[llvm] 342acfc - [AMDGPU] Turn off pass to rewrite partially used virtual superregisters after RenameIndependentSubregs pass with registers of minimal size.

Valery Pykhtin via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 7 03:05:39 PDT 2023


Author: Valery Pykhtin
Date: 2023-06-07T12:05:25+02:00
New Revision: 342acfc9bbc67d81e452b7edc29777bc920f5fd6

URL: https://github.com/llvm/llvm-project/commit/342acfc9bbc67d81e452b7edc29777bc920f5fd6
DIFF: https://github.com/llvm/llvm-project/commit/342acfc9bbc67d81e452b7edc29777bc920f5fd6.diff

LOG: [AMDGPU] Turn off pass to rewrite partially used virtual superregisters after RenameIndependentSubregs pass with registers of minimal size.

There is a failure with this pass in the case when target register class for a subregister isn't known from instruction description (for ex. COPY).
Currently in this situation the RC is obtained using TargetRegisterInfo::getSubRegisterClass but in general it's not working.

In order to fix this two things should be done:
1. Stop processing a subregister if the target register class is unknown (conservative approach)
2. Improve deduction of subregister' target register class (i.e by processing COPY chain)

I was going to implement point 1 but my tests use implicit operands for S_NOP and they don't have associated target register class and all tests fail.
Therefore I decided to turn off the pass now, implement point 1 and fix my tests.

Reviewed By: arsenm, #amdgpu

Differential Revision: https://reviews.llvm.org/D152291

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
    llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
    llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
    llvm/test/CodeGen/AMDGPU/dead-lane.mir
    llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
    llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
    llvm/test/CodeGen/AMDGPU/load-global-i16.ll
    llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
    llvm/test/CodeGen/AMDGPU/mad_64_32.ll
    llvm/test/CodeGen/AMDGPU/mul.ll
    llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
    llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-dbg.mir
    llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir
    llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses.mir
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
    llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 7cb8e57c0d426..a48e984ae0908 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -343,7 +343,7 @@ static cl::opt<bool> EnableMaxIlpSchedStrategy(
 
 static cl::opt<bool> EnableRewritePartialRegUses(
     "amdgpu-enable-rewrite-partial-reg-uses",
-    cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
+    cl::desc("Enable rewrite partial reg uses pass"), cl::init(false),
     cl::Hidden);
 
 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll
index 790b28bc1b96e..7983cba3478db 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll
@@ -11,13 +11,14 @@ define i32 @v_extract_v64i32_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    s_mov_b32 s4, s33
 ; GCN-NEXT:    s_add_i32 s33, s32, 0x3fc0
 ; GCN-NEXT:    s_and_b32 s33, s33, 0xffffc000
-; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
@@ -26,115 +27,133 @@ define i32 @v_extract_v64i32_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 ; 4-byte Folded Spill
-; GCN-NEXT:    global_load_dwordx4 v[3:6], v[0:1], off
-; GCN-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:16
-; GCN-NEXT:    global_load_dwordx4 v[11:14], v[0:1], off offset:32
-; GCN-NEXT:    global_load_dwordx4 v[15:18], v[0:1], off offset:48
-; GCN-NEXT:    global_load_dwordx4 v[19:22], v[0:1], off offset:64
-; GCN-NEXT:    global_load_dwordx4 v[23:26], v[0:1], off offset:80
-; GCN-NEXT:    global_load_dwordx4 v[27:30], v[0:1], off offset:96
-; GCN-NEXT:    global_load_dwordx4 v[31:34], v[0:1], off offset:112
-; GCN-NEXT:    global_load_dwordx4 v[35:38], v[0:1], off offset:128
-; GCN-NEXT:    global_load_dwordx4 v[48:51], v[0:1], off offset:144
-; GCN-NEXT:    global_load_dwordx4 v[52:55], v[0:1], off offset:160
-; GCN-NEXT:    global_load_dwordx4 v[39:42], v[0:1], off offset:176
-; GCN-NEXT:    global_load_dwordx4 v[43:46], v[0:1], off offset:192
-; GCN-NEXT:    global_load_dwordx4 v[56:59], v[0:1], off offset:208
-; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:224
+; GCN-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-NEXT:    global_load_dwordx4 v[2:5], v[0:1], off
+; GCN-NEXT:    global_load_dwordx4 v[16:19], v[0:1], off offset:16
+; GCN-NEXT:    global_load_dwordx4 v[56:59], v[0:1], off offset:32
+; GCN-NEXT:    global_load_dwordx4 v[48:51], v[0:1], off offset:48
+; GCN-NEXT:    global_load_dwordx4 v[20:23], v[0:1], off offset:64
+; GCN-NEXT:    global_load_dwordx4 v[44:47], v[0:1], off offset:80
+; GCN-NEXT:    global_load_dwordx4 v[40:43], v[0:1], off offset:96
+; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:112
+; GCN-NEXT:    global_load_dwordx4 v[36:39], v[0:1], off offset:128
+; GCN-NEXT:    global_load_dwordx4 v[32:35], v[0:1], off offset:144
+; GCN-NEXT:    global_load_dwordx4 v[28:31], v[0:1], off offset:160
+; GCN-NEXT:    global_load_dwordx4 v[52:55], v[0:1], off offset:176
+; GCN-NEXT:    global_load_dwordx4 v[24:27], v[0:1], off offset:192
+; GCN-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:208
 ; GCN-NEXT:    s_add_i32 s32, s32, 0x10000
 ; GCN-NEXT:    s_add_i32 s32, s32, 0xffff0000
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill
-; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:240
-; GCN-NEXT:    v_and_b32_e32 v0, 63, v2
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:544 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:548 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:552 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:556 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:560 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:572 ; 4-byte Folded Spill
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v[0:1], off offset:224
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v[0:1], off offset:240
 ; GCN-NEXT:    v_lshrrev_b32_e64 v1, 6, s33
-; GCN-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; GCN-NEXT:    v_add_u32_e32 v1, 0x100, v1
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s33 offset:256
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:260
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:264
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:268
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:272
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:276
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:280
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:284
+; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:288
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:292
+; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:296
+; GCN-NEXT:    buffer_store_dword v59, off, s[0:3], s33 offset:300
+; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s33 offset:304
+; GCN-NEXT:    buffer_store_dword v49, off, s[0:3], s33 offset:308
+; GCN-NEXT:    buffer_store_dword v50, off, s[0:3], s33 offset:312
+; GCN-NEXT:    buffer_store_dword v51, off, s[0:3], s33 offset:316
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s33 offset:320
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s33 offset:324
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s33 offset:328
+; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s33 offset:332
+; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:336
+; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:340
+; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:344
+; GCN-NEXT:    buffer_store_dword v47, off, s[0:3], s33 offset:348
+; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:352
+; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:356
+; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:360
+; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:364
+; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:368
+; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:372
+; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:376
+; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:380
+; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s33 offset:384
+; GCN-NEXT:    buffer_store_dword v37, off, s[0:3], s33 offset:388
+; GCN-NEXT:    buffer_store_dword v38, off, s[0:3], s33 offset:392
+; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:396
+; GCN-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:400
+; GCN-NEXT:    buffer_store_dword v33, off, s[0:3], s33 offset:404
+; GCN-NEXT:    buffer_store_dword v34, off, s[0:3], s33 offset:408
+; GCN-NEXT:    buffer_store_dword v35, off, s[0:3], s33 offset:412
+; GCN-NEXT:    buffer_store_dword v28, off, s[0:3], s33 offset:416
+; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s33 offset:420
+; GCN-NEXT:    buffer_store_dword v30, off, s[0:3], s33 offset:424
+; GCN-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:428
+; GCN-NEXT:    buffer_store_dword v52, off, s[0:3], s33 offset:432
+; GCN-NEXT:    buffer_store_dword v53, off, s[0:3], s33 offset:436
+; GCN-NEXT:    buffer_store_dword v54, off, s[0:3], s33 offset:440
+; GCN-NEXT:    buffer_store_dword v55, off, s[0:3], s33 offset:444
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s33 offset:448
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s33 offset:452
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s33 offset:456
+; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s33 offset:460
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s33 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s33 offset:548 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s33 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s33 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s33 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s33 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s33 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s33 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v0, 63, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; GCN-NEXT:    v_add_u32_e32 v0, v1, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:256
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:260
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:264
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:268
-; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s33 offset:272
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:276
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:280
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:284
-; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:288
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:292
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:296
-; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:300
-; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:304
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:308
-; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:312
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:316
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:320
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s33 offset:324
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s33 offset:328
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s33 offset:332
-; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s33 offset:336
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s33 offset:340
-; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s33 offset:344
-; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s33 offset:348
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s33 offset:352
-; GCN-NEXT:    buffer_store_dword v28, off, s[0:3], s33 offset:356
-; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s33 offset:360
-; GCN-NEXT:    buffer_store_dword v30, off, s[0:3], s33 offset:364
-; GCN-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:368
-; GCN-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:372
-; GCN-NEXT:    buffer_store_dword v33, off, s[0:3], s33 offset:376
-; GCN-NEXT:    buffer_store_dword v34, off, s[0:3], s33 offset:380
-; GCN-NEXT:    buffer_store_dword v35, off, s[0:3], s33 offset:384
-; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s33 offset:388
-; GCN-NEXT:    buffer_store_dword v37, off, s[0:3], s33 offset:392
-; GCN-NEXT:    buffer_store_dword v38, off, s[0:3], s33 offset:396
-; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s33 offset:400
-; GCN-NEXT:    buffer_store_dword v49, off, s[0:3], s33 offset:404
-; GCN-NEXT:    buffer_store_dword v50, off, s[0:3], s33 offset:408
-; GCN-NEXT:    buffer_store_dword v51, off, s[0:3], s33 offset:412
-; GCN-NEXT:    buffer_store_dword v52, off, s[0:3], s33 offset:416
-; GCN-NEXT:    buffer_store_dword v53, off, s[0:3], s33 offset:420
-; GCN-NEXT:    buffer_store_dword v54, off, s[0:3], s33 offset:424
-; GCN-NEXT:    buffer_store_dword v55, off, s[0:3], s33 offset:428
-; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:432
-; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:436
-; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:440
-; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:444
-; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:448
-; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:452
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:456
-; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:460
-; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:464
-; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:468
-; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:472
-; GCN-NEXT:    buffer_store_dword v59, off, s[0:3], s33 offset:476
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:480
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:484
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:488
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:492
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:496
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:500
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:504
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:508
+; GCN-NEXT:    v_mov_b32_e32 v16, v20
+; GCN-NEXT:    v_mov_b32_e32 v17, v21
+; GCN-NEXT:    v_mov_b32_e32 v18, v22
+; GCN-NEXT:    v_mov_b32_e32 v19, v23
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:464
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:468
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:472
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:476
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:480
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:484
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:488
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:492
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:496
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:500
+; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:504
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:508
 ; GCN-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_load_dword v63, off, s[0:3], s33 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
@@ -144,13 +163,14 @@ define i32 @v_extract_v64i32_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    buffer_load_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_mov_b32 s33, s4
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
@@ -166,13 +186,14 @@ define i16 @v_extract_v128i16_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    s_mov_b32 s4, s33
 ; GCN-NEXT:    s_add_i32 s33, s32, 0x3fc0
 ; GCN-NEXT:    s_and_b32 s33, s33, 0xffffc000
-; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
@@ -181,117 +202,135 @@ define i16 @v_extract_v128i16_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 ; 4-byte Folded Spill
-; GCN-NEXT:    global_load_dwordx4 v[3:6], v[0:1], off
-; GCN-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:16
-; GCN-NEXT:    global_load_dwordx4 v[11:14], v[0:1], off offset:32
-; GCN-NEXT:    global_load_dwordx4 v[15:18], v[0:1], off offset:48
-; GCN-NEXT:    global_load_dwordx4 v[19:22], v[0:1], off offset:64
-; GCN-NEXT:    global_load_dwordx4 v[23:26], v[0:1], off offset:80
-; GCN-NEXT:    global_load_dwordx4 v[27:30], v[0:1], off offset:96
-; GCN-NEXT:    global_load_dwordx4 v[31:34], v[0:1], off offset:112
-; GCN-NEXT:    global_load_dwordx4 v[35:38], v[0:1], off offset:128
-; GCN-NEXT:    global_load_dwordx4 v[48:51], v[0:1], off offset:144
-; GCN-NEXT:    global_load_dwordx4 v[52:55], v[0:1], off offset:160
-; GCN-NEXT:    global_load_dwordx4 v[39:42], v[0:1], off offset:176
-; GCN-NEXT:    global_load_dwordx4 v[43:46], v[0:1], off offset:192
-; GCN-NEXT:    global_load_dwordx4 v[56:59], v[0:1], off offset:208
-; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:224
+; GCN-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-NEXT:    global_load_dwordx4 v[2:5], v[0:1], off
+; GCN-NEXT:    global_load_dwordx4 v[16:19], v[0:1], off offset:16
+; GCN-NEXT:    global_load_dwordx4 v[56:59], v[0:1], off offset:32
+; GCN-NEXT:    global_load_dwordx4 v[48:51], v[0:1], off offset:48
+; GCN-NEXT:    global_load_dwordx4 v[20:23], v[0:1], off offset:64
+; GCN-NEXT:    global_load_dwordx4 v[44:47], v[0:1], off offset:80
+; GCN-NEXT:    global_load_dwordx4 v[40:43], v[0:1], off offset:96
+; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:112
+; GCN-NEXT:    global_load_dwordx4 v[36:39], v[0:1], off offset:128
+; GCN-NEXT:    global_load_dwordx4 v[32:35], v[0:1], off offset:144
+; GCN-NEXT:    global_load_dwordx4 v[28:31], v[0:1], off offset:160
+; GCN-NEXT:    global_load_dwordx4 v[52:55], v[0:1], off offset:176
+; GCN-NEXT:    global_load_dwordx4 v[24:27], v[0:1], off offset:192
+; GCN-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:208
 ; GCN-NEXT:    s_add_i32 s32, s32, 0x10000
 ; GCN-NEXT:    s_add_i32 s32, s32, 0xffff0000
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill
-; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:240
-; GCN-NEXT:    v_bfe_u32 v0, v2, 1, 6
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:544 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:548 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:552 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:556 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:560 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:572 ; 4-byte Folded Spill
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v[0:1], off offset:224
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v[0:1], off offset:240
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s33 offset:256
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:260
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:264
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:268
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:272
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:276
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:280
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:284
+; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:288
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:292
+; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:296
+; GCN-NEXT:    buffer_store_dword v59, off, s[0:3], s33 offset:300
+; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s33 offset:304
+; GCN-NEXT:    buffer_store_dword v49, off, s[0:3], s33 offset:308
+; GCN-NEXT:    buffer_store_dword v50, off, s[0:3], s33 offset:312
+; GCN-NEXT:    buffer_store_dword v51, off, s[0:3], s33 offset:316
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s33 offset:320
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s33 offset:324
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s33 offset:328
+; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s33 offset:332
+; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:336
+; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:340
+; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:344
+; GCN-NEXT:    buffer_store_dword v47, off, s[0:3], s33 offset:348
+; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:352
+; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:356
+; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:360
+; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:364
+; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:368
+; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:372
+; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:376
+; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:380
+; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s33 offset:384
+; GCN-NEXT:    buffer_store_dword v37, off, s[0:3], s33 offset:388
+; GCN-NEXT:    buffer_store_dword v38, off, s[0:3], s33 offset:392
+; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:396
+; GCN-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:400
+; GCN-NEXT:    buffer_store_dword v33, off, s[0:3], s33 offset:404
+; GCN-NEXT:    buffer_store_dword v34, off, s[0:3], s33 offset:408
+; GCN-NEXT:    buffer_store_dword v35, off, s[0:3], s33 offset:412
+; GCN-NEXT:    buffer_store_dword v28, off, s[0:3], s33 offset:416
+; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s33 offset:420
+; GCN-NEXT:    buffer_store_dword v30, off, s[0:3], s33 offset:424
+; GCN-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:428
+; GCN-NEXT:    buffer_store_dword v52, off, s[0:3], s33 offset:432
+; GCN-NEXT:    buffer_store_dword v53, off, s[0:3], s33 offset:436
+; GCN-NEXT:    buffer_store_dword v54, off, s[0:3], s33 offset:440
+; GCN-NEXT:    buffer_store_dword v55, off, s[0:3], s33 offset:444
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s33 offset:448
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s33 offset:452
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s33 offset:456
+; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s33 offset:460
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s33 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s33 offset:548 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s33 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s33 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s33 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s33 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s33 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s33 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    v_bfe_u32 v0, v6, 1, 6
+; GCN-NEXT:    v_lshrrev_b32_e64 v2, 6, s33
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT:    v_and_b32_e32 v1, 1, v2
+; GCN-NEXT:    v_add_u32_e32 v2, 0x100, v2
+; GCN-NEXT:    v_add_u32_e32 v0, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v1, 1, v6
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 4, v1
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:256
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:260
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:264
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:268
-; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s33 offset:272
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:276
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:280
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:284
-; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:288
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:292
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:296
-; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:300
-; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:304
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:308
-; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:312
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:316
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:320
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s33 offset:324
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s33 offset:328
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s33 offset:332
-; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s33 offset:336
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s33 offset:340
-; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s33 offset:344
-; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s33 offset:348
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s33 offset:352
-; GCN-NEXT:    buffer_store_dword v28, off, s[0:3], s33 offset:356
-; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s33 offset:360
-; GCN-NEXT:    buffer_store_dword v30, off, s[0:3], s33 offset:364
-; GCN-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:368
-; GCN-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:372
-; GCN-NEXT:    buffer_store_dword v33, off, s[0:3], s33 offset:376
-; GCN-NEXT:    buffer_store_dword v34, off, s[0:3], s33 offset:380
-; GCN-NEXT:    buffer_store_dword v35, off, s[0:3], s33 offset:384
-; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s33 offset:388
-; GCN-NEXT:    buffer_store_dword v37, off, s[0:3], s33 offset:392
-; GCN-NEXT:    buffer_store_dword v38, off, s[0:3], s33 offset:396
-; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s33 offset:400
-; GCN-NEXT:    buffer_store_dword v49, off, s[0:3], s33 offset:404
-; GCN-NEXT:    buffer_store_dword v50, off, s[0:3], s33 offset:408
-; GCN-NEXT:    buffer_store_dword v51, off, s[0:3], s33 offset:412
-; GCN-NEXT:    buffer_store_dword v52, off, s[0:3], s33 offset:416
-; GCN-NEXT:    buffer_store_dword v53, off, s[0:3], s33 offset:420
-; GCN-NEXT:    buffer_store_dword v54, off, s[0:3], s33 offset:424
-; GCN-NEXT:    buffer_store_dword v55, off, s[0:3], s33 offset:428
-; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:432
-; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:436
-; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:440
-; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:444
-; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:448
-; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:452
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:456
-; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:460
-; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:464
-; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:468
-; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:472
-; GCN-NEXT:    buffer_store_dword v59, off, s[0:3], s33 offset:476
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:480
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:484
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:488
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:492
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:496
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:500
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:504
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:508
-; GCN-NEXT:    v_lshrrev_b32_e64 v3, 6, s33
-; GCN-NEXT:    v_add_u32_e32 v3, 0x100, v3
-; GCN-NEXT:    v_add_u32_e32 v0, v3, v0
+; GCN-NEXT:    v_mov_b32_e32 v16, v20
+; GCN-NEXT:    v_mov_b32_e32 v17, v21
+; GCN-NEXT:    v_mov_b32_e32 v18, v22
+; GCN-NEXT:    v_mov_b32_e32 v19, v23
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:464
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:468
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:472
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:476
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:480
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:484
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:488
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:492
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:496
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:500
+; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:504
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:508
 ; GCN-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_load_dword v63, off, s[0:3], s33 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
@@ -301,15 +340,16 @@ define i16 @v_extract_v128i16_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    buffer_load_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_mov_b32 s33, s4
-; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    s_waitcnt vmcnt(16)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
@@ -325,13 +365,14 @@ define i64 @v_extract_v32i64_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    s_mov_b32 s4, s33
 ; GCN-NEXT:    s_add_i32 s33, s32, 0x3fc0
 ; GCN-NEXT:    s_and_b32 s33, s33, 0xffffc000
-; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
@@ -340,115 +381,133 @@ define i64 @v_extract_v32i64_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 ; 4-byte Folded Spill
-; GCN-NEXT:    global_load_dwordx4 v[3:6], v[0:1], off
-; GCN-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:16
-; GCN-NEXT:    global_load_dwordx4 v[11:14], v[0:1], off offset:32
-; GCN-NEXT:    global_load_dwordx4 v[15:18], v[0:1], off offset:48
-; GCN-NEXT:    global_load_dwordx4 v[19:22], v[0:1], off offset:64
-; GCN-NEXT:    global_load_dwordx4 v[23:26], v[0:1], off offset:80
-; GCN-NEXT:    global_load_dwordx4 v[27:30], v[0:1], off offset:96
-; GCN-NEXT:    global_load_dwordx4 v[31:34], v[0:1], off offset:112
-; GCN-NEXT:    global_load_dwordx4 v[35:38], v[0:1], off offset:128
-; GCN-NEXT:    global_load_dwordx4 v[48:51], v[0:1], off offset:144
-; GCN-NEXT:    global_load_dwordx4 v[52:55], v[0:1], off offset:160
-; GCN-NEXT:    global_load_dwordx4 v[39:42], v[0:1], off offset:176
-; GCN-NEXT:    global_load_dwordx4 v[43:46], v[0:1], off offset:192
-; GCN-NEXT:    global_load_dwordx4 v[56:59], v[0:1], off offset:208
-; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:224
+; GCN-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-NEXT:    global_load_dwordx4 v[2:5], v[0:1], off
+; GCN-NEXT:    global_load_dwordx4 v[16:19], v[0:1], off offset:16
+; GCN-NEXT:    global_load_dwordx4 v[56:59], v[0:1], off offset:32
+; GCN-NEXT:    global_load_dwordx4 v[48:51], v[0:1], off offset:48
+; GCN-NEXT:    global_load_dwordx4 v[20:23], v[0:1], off offset:64
+; GCN-NEXT:    global_load_dwordx4 v[44:47], v[0:1], off offset:80
+; GCN-NEXT:    global_load_dwordx4 v[40:43], v[0:1], off offset:96
+; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:112
+; GCN-NEXT:    global_load_dwordx4 v[36:39], v[0:1], off offset:128
+; GCN-NEXT:    global_load_dwordx4 v[32:35], v[0:1], off offset:144
+; GCN-NEXT:    global_load_dwordx4 v[28:31], v[0:1], off offset:160
+; GCN-NEXT:    global_load_dwordx4 v[52:55], v[0:1], off offset:176
+; GCN-NEXT:    global_load_dwordx4 v[24:27], v[0:1], off offset:192
+; GCN-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:208
 ; GCN-NEXT:    s_add_i32 s32, s32, 0x10000
 ; GCN-NEXT:    s_add_i32 s32, s32, 0xffff0000
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill
-; GCN-NEXT:    global_load_dwordx4 v[60:63], v[0:1], off offset:240
-; GCN-NEXT:    v_and_b32_e32 v0, 31, v2
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:544 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:548 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:552 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:556 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:560 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:564 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:568 ; 4-byte Folded Spill
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:572 ; 4-byte Folded Spill
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v[0:1], off offset:224
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v[0:1], off offset:240
+; GCN-NEXT:    buffer_store_dword v2, off, s[0:3], s33 offset:256
+; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:260
+; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:264
+; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:268
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:272
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:276
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:280
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:284
+; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:288
+; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:292
+; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:296
+; GCN-NEXT:    buffer_store_dword v59, off, s[0:3], s33 offset:300
+; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s33 offset:304
+; GCN-NEXT:    buffer_store_dword v49, off, s[0:3], s33 offset:308
+; GCN-NEXT:    buffer_store_dword v50, off, s[0:3], s33 offset:312
+; GCN-NEXT:    buffer_store_dword v51, off, s[0:3], s33 offset:316
+; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s33 offset:320
+; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s33 offset:324
+; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s33 offset:328
+; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s33 offset:332
+; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:336
+; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:340
+; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:344
+; GCN-NEXT:    buffer_store_dword v47, off, s[0:3], s33 offset:348
+; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:352
+; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:356
+; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:360
+; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:364
+; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:368
+; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:372
+; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:376
+; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:380
+; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s33 offset:384
+; GCN-NEXT:    buffer_store_dword v37, off, s[0:3], s33 offset:388
+; GCN-NEXT:    buffer_store_dword v38, off, s[0:3], s33 offset:392
+; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:396
+; GCN-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:400
+; GCN-NEXT:    buffer_store_dword v33, off, s[0:3], s33 offset:404
+; GCN-NEXT:    buffer_store_dword v34, off, s[0:3], s33 offset:408
+; GCN-NEXT:    buffer_store_dword v35, off, s[0:3], s33 offset:412
+; GCN-NEXT:    buffer_store_dword v28, off, s[0:3], s33 offset:416
+; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s33 offset:420
+; GCN-NEXT:    buffer_store_dword v30, off, s[0:3], s33 offset:424
+; GCN-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:428
+; GCN-NEXT:    buffer_store_dword v52, off, s[0:3], s33 offset:432
+; GCN-NEXT:    buffer_store_dword v53, off, s[0:3], s33 offset:436
+; GCN-NEXT:    buffer_store_dword v54, off, s[0:3], s33 offset:440
+; GCN-NEXT:    buffer_store_dword v55, off, s[0:3], s33 offset:444
+; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s33 offset:448
+; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s33 offset:452
+; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s33 offset:456
+; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s33 offset:460
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v17, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v18, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v19, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v20, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v21, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v22, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v23, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v24, off, s[0:3], s33 offset:544 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v25, off, s[0:3], s33 offset:548 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v26, off, s[0:3], s33 offset:552 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v27, off, s[0:3], s33 offset:556 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v28, off, s[0:3], s33 offset:560 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v29, off, s[0:3], s33 offset:564 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v30, off, s[0:3], s33 offset:568 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v31, off, s[0:3], s33 offset:572 ; 4-byte Folded Reload
+; GCN-NEXT:    v_and_b32_e32 v0, 31, v6
 ; GCN-NEXT:    v_lshrrev_b32_e64 v2, 6, s33
 ; GCN-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; GCN-NEXT:    v_add_u32_e32 v2, 0x100, v2
 ; GCN-NEXT:    v_add_u32_e32 v1, v2, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v60, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v61, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v62, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v63, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:256
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:260
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:264
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:268
-; GCN-NEXT:    buffer_store_dword v7, off, s[0:3], s33 offset:272
-; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:276
-; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:280
-; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:284
-; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:288
-; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:292
-; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:296
-; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:300
-; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:304
-; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:308
-; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:312
-; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:316
-; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:320
-; GCN-NEXT:    buffer_store_dword v20, off, s[0:3], s33 offset:324
-; GCN-NEXT:    buffer_store_dword v21, off, s[0:3], s33 offset:328
-; GCN-NEXT:    buffer_store_dword v22, off, s[0:3], s33 offset:332
-; GCN-NEXT:    buffer_store_dword v23, off, s[0:3], s33 offset:336
-; GCN-NEXT:    buffer_store_dword v24, off, s[0:3], s33 offset:340
-; GCN-NEXT:    buffer_store_dword v25, off, s[0:3], s33 offset:344
-; GCN-NEXT:    buffer_store_dword v26, off, s[0:3], s33 offset:348
-; GCN-NEXT:    buffer_store_dword v27, off, s[0:3], s33 offset:352
-; GCN-NEXT:    buffer_store_dword v28, off, s[0:3], s33 offset:356
-; GCN-NEXT:    buffer_store_dword v29, off, s[0:3], s33 offset:360
-; GCN-NEXT:    buffer_store_dword v30, off, s[0:3], s33 offset:364
-; GCN-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:368
-; GCN-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:372
-; GCN-NEXT:    buffer_store_dword v33, off, s[0:3], s33 offset:376
-; GCN-NEXT:    buffer_store_dword v34, off, s[0:3], s33 offset:380
-; GCN-NEXT:    buffer_store_dword v35, off, s[0:3], s33 offset:384
-; GCN-NEXT:    buffer_store_dword v36, off, s[0:3], s33 offset:388
-; GCN-NEXT:    buffer_store_dword v37, off, s[0:3], s33 offset:392
-; GCN-NEXT:    buffer_store_dword v38, off, s[0:3], s33 offset:396
-; GCN-NEXT:    buffer_store_dword v48, off, s[0:3], s33 offset:400
-; GCN-NEXT:    buffer_store_dword v49, off, s[0:3], s33 offset:404
-; GCN-NEXT:    buffer_store_dword v50, off, s[0:3], s33 offset:408
-; GCN-NEXT:    buffer_store_dword v51, off, s[0:3], s33 offset:412
-; GCN-NEXT:    buffer_store_dword v52, off, s[0:3], s33 offset:416
-; GCN-NEXT:    buffer_store_dword v53, off, s[0:3], s33 offset:420
-; GCN-NEXT:    buffer_store_dword v54, off, s[0:3], s33 offset:424
-; GCN-NEXT:    buffer_store_dword v55, off, s[0:3], s33 offset:428
-; GCN-NEXT:    buffer_store_dword v39, off, s[0:3], s33 offset:432
-; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s33 offset:436
-; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:440
-; GCN-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:444
-; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:448
-; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:452
-; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 offset:456
-; GCN-NEXT:    buffer_store_dword v46, off, s[0:3], s33 offset:460
-; GCN-NEXT:    buffer_store_dword v56, off, s[0:3], s33 offset:464
-; GCN-NEXT:    buffer_store_dword v57, off, s[0:3], s33 offset:468
-; GCN-NEXT:    buffer_store_dword v58, off, s[0:3], s33 offset:472
-; GCN-NEXT:    buffer_store_dword v59, off, s[0:3], s33 offset:476
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:480
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:484
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:488
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:492
-; GCN-NEXT:    buffer_load_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_dword v3, off, s[0:3], s33 offset:496
-; GCN-NEXT:    buffer_store_dword v4, off, s[0:3], s33 offset:500
-; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], s33 offset:504
-; GCN-NEXT:    buffer_store_dword v6, off, s[0:3], s33 offset:508
+; GCN-NEXT:    v_mov_b32_e32 v16, v20
+; GCN-NEXT:    v_mov_b32_e32 v17, v21
+; GCN-NEXT:    v_mov_b32_e32 v18, v22
+; GCN-NEXT:    v_mov_b32_e32 v19, v23
+; GCN-NEXT:    buffer_store_dword v16, off, s[0:3], s33 offset:464
+; GCN-NEXT:    buffer_store_dword v17, off, s[0:3], s33 offset:468
+; GCN-NEXT:    buffer_store_dword v18, off, s[0:3], s33 offset:472
+; GCN-NEXT:    buffer_store_dword v19, off, s[0:3], s33 offset:476
+; GCN-NEXT:    buffer_store_dword v8, off, s[0:3], s33 offset:480
+; GCN-NEXT:    buffer_store_dword v9, off, s[0:3], s33 offset:484
+; GCN-NEXT:    buffer_store_dword v10, off, s[0:3], s33 offset:488
+; GCN-NEXT:    buffer_store_dword v11, off, s[0:3], s33 offset:492
+; GCN-NEXT:    buffer_store_dword v12, off, s[0:3], s33 offset:496
+; GCN-NEXT:    buffer_store_dword v13, off, s[0:3], s33 offset:500
+; GCN-NEXT:    buffer_store_dword v14, off, s[0:3], s33 offset:504
+; GCN-NEXT:    buffer_store_dword v15, off, s[0:3], s33 offset:508
 ; GCN-NEXT:    buffer_load_dword v0, v1, s[0:3], 0 offen
 ; GCN-NEXT:    buffer_load_dword v1, v1, s[0:3], 0 offen offset:4
 ; GCN-NEXT:    buffer_load_dword v63, off, s[0:3], s33 ; 4-byte Folded Reload
@@ -459,13 +518,14 @@ define i64 @v_extract_v32i64_varidx(ptr addrspace(1) %ptr, i32 %idx) {
 ; GCN-NEXT:    buffer_load_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload
 ; GCN-NEXT:    buffer_load_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload
-; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload
+; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_mov_b32 s33, s4
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll
index 4535db66a9e8f..8b796bfde046c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll
@@ -50,17 +50,15 @@ define amdgpu_ps i128 @extractelement_vgpr_v4i128_sgpr_idx(ptr addrspace(1) %ptr
 ; GFX9-NEXT:    s_set_gpr_idx_on s2, gpr_idx(SRC0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v2
 ; GFX9-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-NEXT:    v_mov_b32_e32 v18, v2
 ; GFX9-NEXT:    s_set_gpr_idx_off
 ; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX9-NEXT:    s_set_gpr_idx_on s2, gpr_idx(SRC0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, v2
-; GFX9-NEXT:    s_set_gpr_idx_off
 ; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX9-NEXT:    s_set_gpr_idx_on s2, gpr_idx(SRC0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-NEXT:    v_mov_b32_e32 v3, v3
 ; GFX9-NEXT:    s_set_gpr_idx_off
-; GFX9-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v18
+; GFX9-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX9-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: extractelement_vgpr_v4i128_sgpr_idx:
@@ -162,56 +160,56 @@ define i128 @extractelement_vgpr_v4i128_vgpr_idx(ptr addrspace(1) %ptr, i32 %idx
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    global_load_dwordx4 v[3:6], v[0:1], off
 ; GFX9-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:16
-; GFX9-NEXT:    global_load_dwordx4 v[11:14], v[0:1], off offset:32
-; GFX9-NEXT:    global_load_dwordx4 v[15:18], v[0:1], off offset:48
-; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v2
-; GFX9-NEXT:    v_add_u32_e32 v2, 1, v0
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(3)
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v19, v4, v6, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX9-NEXT:    v_lshlrev_b32_e32 v2, 1, v2
+; GFX9-NEXT:    v_add_u32_e32 v16, 1, v2
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v16
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v3, v5, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v12, v4, v6, s[4:5]
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(2)
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v19, v8, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v2
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v11, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v6, v12, v8, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v16
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v10, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v16
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(1)
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v12, vcc
+; GFX9-NEXT:    global_load_dwordx4 v[8:11], v[0:1], off offset:32
+; GFX9-NEXT:    global_load_dwordx4 v[12:15], v[0:1], off offset:48
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v8, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v6, v9, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v16
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v9, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v13, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v16, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v16
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v10, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v11, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v15, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v1, v17, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v12, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v16
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v12, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v13, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v17, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v18, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v14, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v16
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v14, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v15, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: extractelement_vgpr_v4i128_vgpr_idx:
@@ -221,179 +219,179 @@ define i128 @extractelement_vgpr_v4i128_vgpr_idx(ptr addrspace(1) %ptr, i32 %idx
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 16, v0
 ; GFX8-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
 ; GFX8-NEXT:    flat_load_dwordx4 v[7:10], v[7:8]
-; GFX8-NEXT:    v_add_u32_e32 v11, vcc, 32, v0
-; GFX8-NEXT:    v_addc_u32_e32 v12, vcc, 0, v1, vcc
-; GFX8-NEXT:    flat_load_dwordx4 v[11:14], v[11:12]
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 48, v0
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT:    flat_load_dwordx4 v[15:18], v[0:1]
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 1, v2
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v0
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT:    s_waitcnt vmcnt(3)
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v19, v4, v6, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX8-NEXT:    v_lshlrev_b32_e32 v16, 1, v2
+; GFX8-NEXT:    v_add_u32_e32 v17, vcc, 1, v16
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v17
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v16
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v3, v5, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v12, v4, v6, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, v3, v5, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GFX8-NEXT:    s_waitcnt vmcnt(2)
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, v19, v8, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 32, v0
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v16
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_cndmask_b32_e32 v6, v11, v7, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v11, v12, v8, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, v5, v10, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v16
+; GFX8-NEXT:    v_cndmask_b32_e32 v6, v6, v9, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v7, v11, v10, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
+; GFX8-NEXT:    flat_load_dwordx4 v[8:11], v[2:3]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 48, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[12:15], v[0:1]
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v16
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v6, v8, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v7, v9, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v5, v8, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v9, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v16
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, v5, v12, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v2
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v16
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v12, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v2
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v13, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v16
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v14, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, v5, v16, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v2
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v15, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v17, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v18, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v17, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v18, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX7-LABEL: extractelement_vgpr_v4i128_vgpr_idx:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    s_mov_b32 s6, 0
-; GFX7-NEXT:    s_mov_b32 s7, 0xf000
-; GFX7-NEXT:    s_mov_b64 s[4:5], 0
-; GFX7-NEXT:    buffer_load_dwordx4 v[3:6], v[0:1], s[4:7], 0 addr64
-; GFX7-NEXT:    buffer_load_dwordx4 v[7:10], v[0:1], s[4:7], 0 addr64 offset:16
-; GFX7-NEXT:    buffer_load_dwordx4 v[11:14], v[0:1], s[4:7], 0 addr64 offset:32
-; GFX7-NEXT:    buffer_load_dwordx4 v[15:18], v[0:1], s[4:7], 0 addr64 offset:48
-; GFX7-NEXT:    v_lshlrev_b32_e32 v0, 1, v2
-; GFX7-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT:    s_waitcnt vmcnt(3)
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v19, v4, v6, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7-NEXT:    s_mov_b32 s10, 0
+; GFX7-NEXT:    s_mov_b32 s11, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[8:9], 0
+; GFX7-NEXT:    buffer_load_dwordx4 v[3:6], v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT:    buffer_load_dwordx4 v[7:10], v[0:1], s[8:11], 0 addr64 offset:16
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 1, v2
+; GFX7-NEXT:    v_add_i32_e32 v16, vcc, 1, v2
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v16
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v2
+; GFX7-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NEXT:    v_cndmask_b32_e64 v11, v3, v5, s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v12, v4, v6, s[4:5]
 ; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GFX7-NEXT:    s_waitcnt vmcnt(2)
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v5, v19, v8, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v2
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_cndmask_b32_e32 v5, v11, v7, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v6, v12, v8, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v16
 ; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v5, v5, v10, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v2
+; GFX7-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v16
 ; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GFX7-NEXT:    s_waitcnt vmcnt(1)
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v5, v5, v12, vcc
+; GFX7-NEXT:    buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:32
+; GFX7-NEXT:    buffer_load_dwordx4 v[12:15], v[0:1], s[8:11], 0 addr64 offset:48
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v2
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc
+; GFX7-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v5, v8, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v6, v9, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v9, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v2
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v13, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v5, v5, v16, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v10, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v11, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v2
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v15, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v1, v17, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v0, v12, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v12, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v4, v4, v13, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v2
-; GFX7-NEXT:    v_cndmask_b32_e32 v2, v3, v17, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v3, v4, v18, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v0, v14, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v16
+; GFX7-NEXT:    v_cndmask_b32_e32 v2, v3, v14, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v4, v15, vcc
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: extractelement_vgpr_v4i128_vgpr_idx:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_clause 0x3
-; GFX10-NEXT:    global_load_dwordx4 v[3:6], v[0:1], off
-; GFX10-NEXT:    global_load_dwordx4 v[7:10], v[0:1], off offset:16
-; GFX10-NEXT:    global_load_dwordx4 v[11:14], v[0:1], off offset:32
-; GFX10-NEXT:    global_load_dwordx4 v[15:18], v[0:1], off offset:48
-; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 1, v2
-; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v1
-; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v19, v4, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 2, v1
+; GFX10-NEXT:    s_clause 0x1
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v[0:1], off
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off offset:16
+; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 1, v2
+; GFX10-NEXT:    global_load_dwordx4 v[8:11], v[0:1], off offset:32
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 1, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v3
 ; GFX10-NEXT:    s_waitcnt vmcnt(2)
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v7, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v19, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v12, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v17, v13, v15, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v12, v14, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v19, v13, v15, s4
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v[0:1], off offset:48
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 2, v3
+; GFX10-NEXT:    s_waitcnt vmcnt(2)
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v16, v4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v17, v5, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v18, v4, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v19, v5, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 3, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v7, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 4, v3
+; GFX10-NEXT:    s_waitcnt vmcnt(1)
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 3, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v10, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v9, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v9, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 4, v1
-; GFX10-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v12, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v12, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v13, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v13, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v14, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 6, v1
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 6, v3
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v15, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v16, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v15, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 7, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v2, v17, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v5, v18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v3, v17, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v4, v18, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v12, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v12, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v13, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 7, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v4, v14, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v5, v15, s4
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: extractelement_vgpr_v4i128_vgpr_idx:
@@ -401,58 +399,59 @@ define i128 @extractelement_vgpr_v4i128_vgpr_idx(ptr addrspace(1) %ptr, i32 %idx
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    s_clause 0x3
-; GFX11-NEXT:    global_load_b128 v[3:6], v[0:1], off
-; GFX11-NEXT:    global_load_b128 v[7:10], v[0:1], off offset:16
-; GFX11-NEXT:    global_load_b128 v[11:14], v[0:1], off offset:32
-; GFX11-NEXT:    global_load_b128 v[15:18], v[0:1], off offset:48
+; GFX11-NEXT:    global_load_b128 v[16:19], v[0:1], off
+; GFX11-NEXT:    global_load_b128 v[4:7], v[0:1], off offset:16
+; GFX11-NEXT:    global_load_b128 v[8:11], v[0:1], off offset:32
+; GFX11-NEXT:    global_load_b128 v[12:15], v[0:1], off offset:48
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 1, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11-NEXT:    s_waitcnt vmcnt(3)
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_nc_u32 v1, 1, v0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v1
-; GFX11-NEXT:    v_cndmask_b32_e32 v19, v4, v6, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v3, v17, v19, vcc_lo
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v16, v18 :: v_dual_add_nc_u32 v1, 1, v0
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v1
 ; GFX11-NEXT:    s_waitcnt vmcnt(2)
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, v2, v7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 2, v1
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v19, v8, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v3, v3, v5
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v1
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v9 :: v_dual_cndmask_b32 v5, v5, v10
+; GFX11-NEXT:    v_cndmask_b32_e64 v16, v16, v18, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v17, v17, v19, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 2, v1
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v6 :: v_dual_cndmask_b32 v3, v3, v7
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v9, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 4, v1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v16, v4, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v17, v5, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v1
 ; GFX11-NEXT:    s_waitcnt vmcnt(1)
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v11 :: v_dual_cndmask_b32 v5, v5, v12
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v8 :: v_dual_cndmask_b32 v3, v3, v9
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v12, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v1
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v13 :: v_dual_cndmask_b32 v5, v5, v14
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v7, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 4, v1
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v13, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v14, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 6, v1
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v9, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v1
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v15 :: v_dual_cndmask_b32 v5, v5, v16
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v12 :: v_dual_cndmask_b32 v3, v3, v13
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v15, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 6, v1
+; GFX11-NEXT:    v_cndmask_b32_e32 v0, v2, v14, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v12, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v13, s0
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 7, v1
-; GFX11-NEXT:    v_dual_cndmask_b32 v0, v2, v17 :: v_dual_cndmask_b32 v1, v5, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v3, v17, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, v3, v15, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v4, v14, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v4, v18, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v5, v15, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %vector = load <4 x i128>, ptr addrspace(1) %ptr
   %element = extractelement <4 x i128> %vector, i32 %idx
@@ -465,6 +464,7 @@ define amdgpu_ps i128 @extractelement_sgpr_v4i128_vgpr_idx(ptr addrspace(4) inre
 ; GFX9-NEXT:    s_load_dwordx16 s[0:15], s[2:3], 0x0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT:    v_add_u32_e32 v19, 1, v0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s1
@@ -495,39 +495,38 @@ define amdgpu_ps i128 @extractelement_sgpr_v4i128_vgpr_idx(ptr addrspace(4) inre
 ; GFX9-NEXT:    v_cndmask_b32_e32 v17, v17, v11, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v18, v18, v12, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX9-NEXT:    v_mov_b32_e32 v15, s14
-; GFX9-NEXT:    v_mov_b32_e32 v16, s15
 ; GFX9-NEXT:    v_cndmask_b32_e32 v17, v17, v13, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v18, v18, v14, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX9-NEXT:    v_add_u32_e32 v0, 1, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v17, v17, v15, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v18, v18, v16, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v19
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v19
+; GFX9-NEXT:    v_mov_b32_e32 v15, s14
+; GFX9-NEXT:    v_mov_b32_e32 v16, s15
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[0:1]
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], 3, v19
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v17, v15, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v1, v7, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v18, v16, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v19
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v19
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v1, v15, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v16, vcc
-; GFX9-NEXT:    v_readfirstlane_b32 s0, v17
-; GFX9-NEXT:    v_readfirstlane_b32 s1, v18
-; GFX9-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v19
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v13, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v2, v14, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v19
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v15, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v16, vcc
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX9-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: extractelement_sgpr_v4i128_vgpr_idx:
@@ -565,39 +564,39 @@ define amdgpu_ps i128 @extractelement_sgpr_v4i128_vgpr_idx(ptr addrspace(4) inre
 ; GFX8-NEXT:    v_cndmask_b32_e32 v17, v17, v11, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v18, v18, v12, vcc
 ; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s14
-; GFX8-NEXT:    v_mov_b32_e32 v16, s15
 ; GFX8-NEXT:    v_cndmask_b32_e32 v17, v17, v13, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v18, v18, v14, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v17, v17, v15, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v18, v18, v16, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 1, v0
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX8-NEXT:    v_add_u32_e32 v19, vcc, 1, v0
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v19
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v19
+; GFX8-NEXT:    v_mov_b32_e32 v15, s14
+; GFX8-NEXT:    v_mov_b32_e32 v16, s15
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[0:1]
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], 3, v19
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v17, v15, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v1, v7, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v18, v16, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v19
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v19
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v15, vcc
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v2, v16, vcc
-; GFX8-NEXT:    v_readfirstlane_b32 s0, v17
-; GFX8-NEXT:    v_readfirstlane_b32 s1, v18
-; GFX8-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX8-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v19
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v13, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v4, v2, v14, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v19
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v15, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v16, vcc
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX8-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX8-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: extractelement_sgpr_v4i128_vgpr_idx:
@@ -635,39 +634,39 @@ define amdgpu_ps i128 @extractelement_sgpr_v4i128_vgpr_idx(ptr addrspace(4) inre
 ; GFX7-NEXT:    v_cndmask_b32_e32 v17, v17, v11, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v18, v18, v12, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX7-NEXT:    v_mov_b32_e32 v15, s14
-; GFX7-NEXT:    v_mov_b32_e32 v16, s15
 ; GFX7-NEXT:    v_cndmask_b32_e32 v17, v17, v13, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v18, v18, v14, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v17, v17, v15, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v18, v18, v16, vcc
-; GFX7-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7-NEXT:    v_add_i32_e32 v19, vcc, 1, v0
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v19
 ; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v19
+; GFX7-NEXT:    v_mov_b32_e32 v15, s14
+; GFX7-NEXT:    v_mov_b32_e32 v16, s15
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[0:1]
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[0:1]
+; GFX7-NEXT:    v_cmp_eq_u32_e64 s[0:1], 3, v19
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v17, v15, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, v1, v7, s[0:1]
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v18, v16, vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v19
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v19
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
-; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v1, v15, vcc
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v2, v16, vcc
-; GFX7-NEXT:    v_readfirstlane_b32 s0, v17
-; GFX7-NEXT:    v_readfirstlane_b32 s1, v18
-; GFX7-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX7-NEXT:    v_readfirstlane_b32 s3, v1
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v19
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v3, v13, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v4, v2, v14, vcc
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v19
+; GFX7-NEXT:    v_cndmask_b32_e32 v2, v3, v15, vcc
+; GFX7-NEXT:    v_cndmask_b32_e32 v3, v4, v16, vcc
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX7-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: extractelement_sgpr_v4i128_vgpr_idx:
@@ -714,16 +713,16 @@ define amdgpu_ps i128 @extractelement_sgpr_v4i128_vgpr_idx(ptr addrspace(4) inre
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s17, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s16, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v3, s17, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s17, s0
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 7, v1
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v4, s18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v5, s19, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v4, s18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v5, s19, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s18, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s19, s0
-; GFX10-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s19, s0
+; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: extractelement_sgpr_v4i128_vgpr_idx:
@@ -775,18 +774,18 @@ define amdgpu_ps i128 @extractelement_sgpr_v4i128_vgpr_idx(ptr addrspace(4) inre
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s16, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v3, s17, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s17, s0
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 7, v1
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v4, s18, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v5, s19, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v4, s18, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v5, s19, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s18, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s19, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s19, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX11-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX11-NEXT:    ; return to shader part epilog
   %vector = load <4 x i128>, ptr addrspace(4) %ptr
   %element = extractelement <4 x i128> %vector, i32 %idx

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
index 4bc1512893f18..db4e678c74a41 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
@@ -294,46 +294,46 @@ define i64 @dyn_extract_v8i64_const_s_v(i32 %sel) {
 ; GCN-LABEL: dyn_extract_v8i64_const_s_v:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    s_mov_b64 s[16:17], 2
-; GCN-NEXT:    s_mov_b64 s[18:19], 1
-; GCN-NEXT:    s_mov_b64 s[14:15], 3
-; GCN-NEXT:    v_mov_b32_e32 v1, s18
-; GCN-NEXT:    v_mov_b32_e32 v2, s19
-; GCN-NEXT:    v_mov_b32_e32 v3, s16
-; GCN-NEXT:    v_mov_b32_e32 v4, s17
+; GCN-NEXT:    s_mov_b64 s[4:5], 1
+; GCN-NEXT:    s_mov_b64 s[6:7], 2
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-NEXT:    v_mov_b32_e32 v3, s6
+; GCN-NEXT:    v_mov_b32_e32 v4, s7
+; GCN-NEXT:    s_mov_b64 s[8:9], 3
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT:    s_mov_b64 s[12:13], 4
-; GCN-NEXT:    v_mov_b32_e32 v5, s14
-; GCN-NEXT:    v_mov_b32_e32 v6, s15
+; GCN-NEXT:    v_mov_b32_e32 v5, s8
+; GCN-NEXT:    v_mov_b32_e32 v6, s9
+; GCN-NEXT:    s_mov_b64 s[10:11], 4
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GCN-NEXT:    s_mov_b64 s[10:11], 5
-; GCN-NEXT:    v_mov_b32_e32 v7, s12
-; GCN-NEXT:    v_mov_b32_e32 v8, s13
+; GCN-NEXT:    s_mov_b64 s[12:13], 5
+; GCN-NEXT:    v_mov_b32_e32 v7, s10
+; GCN-NEXT:    v_mov_b32_e32 v8, s11
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GCN-NEXT:    s_mov_b64 s[8:9], 6
-; GCN-NEXT:    v_mov_b32_e32 v9, s10
-; GCN-NEXT:    v_mov_b32_e32 v10, s11
+; GCN-NEXT:    s_mov_b64 s[14:15], 6
+; GCN-NEXT:    v_mov_b32_e32 v9, s12
+; GCN-NEXT:    v_mov_b32_e32 v10, s13
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GCN-NEXT:    s_mov_b64 s[6:7], 7
-; GCN-NEXT:    v_mov_b32_e32 v11, s8
-; GCN-NEXT:    v_mov_b32_e32 v12, s9
+; GCN-NEXT:    s_mov_b64 s[16:17], 7
+; GCN-NEXT:    v_mov_b32_e32 v11, s14
+; GCN-NEXT:    v_mov_b32_e32 v12, s15
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GCN-NEXT:    s_mov_b64 s[4:5], 8
-; GCN-NEXT:    v_mov_b32_e32 v13, s6
-; GCN-NEXT:    v_mov_b32_e32 v14, s7
+; GCN-NEXT:    s_mov_b64 s[18:19], 8
+; GCN-NEXT:    v_mov_b32_e32 v13, s16
+; GCN-NEXT:    v_mov_b32_e32 v14, s17
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GCN-NEXT:    v_mov_b32_e32 v15, s4
-; GCN-NEXT:    v_mov_b32_e32 v16, s5
+; GCN-NEXT:    v_mov_b32_e32 v15, s18
+; GCN-NEXT:    v_mov_b32_e32 v16, s19
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
@@ -345,73 +345,73 @@ define i64 @dyn_extract_v8i64_const_s_v(i32 %sel) {
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b64 s[4:5], 2
+; GFX10-NEXT:    s_mov_b64 s[6:7], 2
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    v_mov_b32_e32 v1, s4
-; GFX10-NEXT:    v_mov_b32_e32 v2, s5
-; GFX10-NEXT:    s_mov_b64 s[6:7], 1
-; GFX10-NEXT:    s_mov_b64 s[4:5], 3
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s6, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s7, v2, vcc_lo
+; GFX10-NEXT:    v_mov_b32_e32 v1, s6
+; GFX10-NEXT:    v_mov_b32_e32 v2, s7
+; GFX10-NEXT:    s_mov_b64 s[4:5], 1
+; GFX10-NEXT:    s_mov_b64 s[8:9], 3
+; GFX10-NEXT:    s_mov_b64 s[10:11], 4
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s4, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s5, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    s_mov_b64 s[6:7], 4
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX10-NEXT:    s_mov_b64 s[12:13], 5
+; GFX10-NEXT:    s_mov_b64 s[14:15], 6
+; GFX10-NEXT:    s_mov_b64 s[16:17], 7
+; GFX10-NEXT:    s_mov_b64 s[18:19], 8
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    s_mov_b64 s[4:5], 5
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    s_mov_b64 s[6:7], 6
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX10-NEXT:    s_mov_b64 s[4:5], 7
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    s_mov_b64 s[6:7], 8
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s17, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s6, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s7, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s19, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: dyn_extract_v8i64_const_s_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    s_mov_b64 s[0:1], 2
+; GFX11-NEXT:    s_mov_b64 s[2:3], 2
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
-; GFX11-NEXT:    s_mov_b64 s[2:3], 1
-; GFX11-NEXT:    s_mov_b64 s[0:1], 3
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
+; GFX11-NEXT:    s_mov_b64 s[0:1], 1
+; GFX11-NEXT:    s_mov_b64 s[4:5], 3
+; GFX11-NEXT:    s_mov_b64 s[6:7], 4
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_mov_b64 s[2:3], 4
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
+; GFX11-NEXT:    s_mov_b64 s[8:9], 5
+; GFX11-NEXT:    s_mov_b64 s[10:11], 6
+; GFX11-NEXT:    s_mov_b64 s[12:13], 7
+; GFX11-NEXT:    s_mov_b64 s[14:15], 8
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    s_mov_b64 s[0:1], 5
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX11-NEXT:    s_mov_b64 s[2:3], 6
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX11-NEXT:    s_mov_b64 s[0:1], 7
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX11-NEXT:    s_mov_b64 s[2:3], 8
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %ext = extractelement <8 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8>, i32 %sel
@@ -495,28 +495,40 @@ entry:
 define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
 ; GPRIDX-LABEL: dyn_extract_v8i64_s_v:
 ; GPRIDX:       ; %bb.0: ; %entry
-; GPRIDX-NEXT:    v_mov_b32_e32 v1, s2
-; GPRIDX-NEXT:    v_mov_b32_e32 v2, s3
-; GPRIDX-NEXT:    v_mov_b32_e32 v3, s4
-; GPRIDX-NEXT:    v_mov_b32_e32 v4, s5
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    v_mov_b32_e32 v1, s0
+; GPRIDX-NEXT:    v_mov_b32_e32 v2, s1
+; GPRIDX-NEXT:    v_mov_b32_e32 v3, s2
+; GPRIDX-NEXT:    v_mov_b32_e32 v4, s3
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GPRIDX-NEXT:    v_mov_b32_e32 v5, s6
-; GPRIDX-NEXT:    v_mov_b32_e32 v6, s7
+; GPRIDX-NEXT:    s_mov_b32 s6, s8
+; GPRIDX-NEXT:    s_mov_b32 s7, s9
+; GPRIDX-NEXT:    v_mov_b32_e32 v5, s4
+; GPRIDX-NEXT:    v_mov_b32_e32 v6, s5
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GPRIDX-NEXT:    v_mov_b32_e32 v7, s8
-; GPRIDX-NEXT:    v_mov_b32_e32 v8, s9
+; GPRIDX-NEXT:    s_mov_b32 s8, s10
+; GPRIDX-NEXT:    s_mov_b32 s9, s11
+; GPRIDX-NEXT:    v_mov_b32_e32 v7, s6
+; GPRIDX-NEXT:    v_mov_b32_e32 v8, s7
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GPRIDX-NEXT:    v_mov_b32_e32 v9, s10
-; GPRIDX-NEXT:    v_mov_b32_e32 v10, s11
+; GPRIDX-NEXT:    s_mov_b32 s10, s12
+; GPRIDX-NEXT:    s_mov_b32 s11, s13
+; GPRIDX-NEXT:    v_mov_b32_e32 v9, s8
+; GPRIDX-NEXT:    v_mov_b32_e32 v10, s9
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GPRIDX-NEXT:    v_mov_b32_e32 v11, s12
-; GPRIDX-NEXT:    v_mov_b32_e32 v12, s13
+; GPRIDX-NEXT:    v_mov_b32_e32 v11, s10
+; GPRIDX-NEXT:    v_mov_b32_e32 v12, s11
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
@@ -537,28 +549,40 @@ define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
 ;
 ; MOVREL-LABEL: dyn_extract_v8i64_s_v:
 ; MOVREL:       ; %bb.0: ; %entry
-; MOVREL-NEXT:    v_mov_b32_e32 v1, s2
-; MOVREL-NEXT:    v_mov_b32_e32 v2, s3
-; MOVREL-NEXT:    v_mov_b32_e32 v3, s4
-; MOVREL-NEXT:    v_mov_b32_e32 v4, s5
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    v_mov_b32_e32 v1, s0
+; MOVREL-NEXT:    v_mov_b32_e32 v2, s1
+; MOVREL-NEXT:    v_mov_b32_e32 v3, s2
+; MOVREL-NEXT:    v_mov_b32_e32 v4, s3
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; MOVREL-NEXT:    v_mov_b32_e32 v5, s6
-; MOVREL-NEXT:    v_mov_b32_e32 v6, s7
+; MOVREL-NEXT:    s_mov_b32 s6, s8
+; MOVREL-NEXT:    s_mov_b32 s7, s9
+; MOVREL-NEXT:    v_mov_b32_e32 v5, s4
+; MOVREL-NEXT:    v_mov_b32_e32 v6, s5
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; MOVREL-NEXT:    v_mov_b32_e32 v7, s8
-; MOVREL-NEXT:    v_mov_b32_e32 v8, s9
+; MOVREL-NEXT:    s_mov_b32 s8, s10
+; MOVREL-NEXT:    s_mov_b32 s9, s11
+; MOVREL-NEXT:    v_mov_b32_e32 v7, s6
+; MOVREL-NEXT:    v_mov_b32_e32 v8, s7
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; MOVREL-NEXT:    v_mov_b32_e32 v9, s10
-; MOVREL-NEXT:    v_mov_b32_e32 v10, s11
+; MOVREL-NEXT:    s_mov_b32 s10, s12
+; MOVREL-NEXT:    s_mov_b32 s11, s13
+; MOVREL-NEXT:    v_mov_b32_e32 v9, s8
+; MOVREL-NEXT:    v_mov_b32_e32 v10, s9
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; MOVREL-NEXT:    v_mov_b32_e32 v11, s12
-; MOVREL-NEXT:    v_mov_b32_e32 v12, s13
+; MOVREL-NEXT:    v_mov_b32_e32 v11, s10
+; MOVREL-NEXT:    v_mov_b32_e32 v12, s11
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
@@ -579,56 +603,88 @@ define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
 ;
 ; GFX10-LABEL: dyn_extract_v8i64_s_v:
 ; GFX10:       ; %bb.0: ; %entry
-; GFX10-NEXT:    v_mov_b32_e32 v1, s4
-; GFX10-NEXT:    v_mov_b32_e32 v2, s5
+; GFX10-NEXT:    s_mov_b32 s0, s2
+; GFX10-NEXT:    s_mov_b32 s2, s4
+; GFX10-NEXT:    s_mov_b32 s19, s5
+; GFX10-NEXT:    v_mov_b32_e32 v1, s2
+; GFX10-NEXT:    v_mov_b32_e32 v2, s19
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX10-NEXT:    s_mov_b32 s1, s3
+; GFX10-NEXT:    s_mov_b32 s4, s6
+; GFX10-NEXT:    s_mov_b32 s5, s7
+; GFX10-NEXT:    s_mov_b32 s6, s8
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX10-NEXT:    s_mov_b32 s7, s9
+; GFX10-NEXT:    s_mov_b32 s8, s10
+; GFX10-NEXT:    s_mov_b32 s9, s11
+; GFX10-NEXT:    s_mov_b32 s10, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    s_mov_b32 s11, s13
+; GFX10-NEXT:    s_mov_b32 s12, s14
+; GFX10-NEXT:    s_mov_b32 s13, s15
+; GFX10-NEXT:    s_mov_b32 s14, s16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    s_mov_b32 s15, s17
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s17, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: dyn_extract_v8i64_s_v:
 ; GFX11:       ; %bb.0: ; %entry
-; GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
+; GFX11-NEXT:    s_mov_b32 s0, s2
+; GFX11-NEXT:    s_mov_b32 s2, s4
+; GFX11-NEXT:    s_mov_b32 s19, s5
+; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s19
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX11-NEXT:    s_mov_b32 s1, s3
+; GFX11-NEXT:    s_mov_b32 s4, s6
+; GFX11-NEXT:    s_mov_b32 s5, s7
+; GFX11-NEXT:    s_mov_b32 s6, s8
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX11-NEXT:    s_mov_b32 s7, s9
+; GFX11-NEXT:    s_mov_b32 s8, s10
+; GFX11-NEXT:    s_mov_b32 s9, s11
+; GFX11-NEXT:    s_mov_b32 s10, s12
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT:    s_mov_b32 s11, s13
+; GFX11-NEXT:    s_mov_b32 s12, s14
+; GFX11-NEXT:    s_mov_b32 s13, s15
+; GFX11-NEXT:    s_mov_b32 s14, s16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT:    s_mov_b32 s15, s17
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s14, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s16, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s17, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[0:1], v[0:1], off
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
@@ -2420,18 +2476,26 @@ entry:
 define amdgpu_ps double @dyn_extract_v6f64_s_v(<6 x double> inreg %vec, i32 %sel) {
 ; GCN-LABEL: dyn_extract_v6f64_s_v:
 ; GCN:       ; %bb.0: ; %entry
-; GCN-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_mov_b32_e32 v4, s5
+; GCN-NEXT:    s_mov_b32 s0, s2
+; GCN-NEXT:    s_mov_b32 s1, s3
+; GCN-NEXT:    s_mov_b32 s2, s4
+; GCN-NEXT:    s_mov_b32 s3, s5
+; GCN-NEXT:    s_mov_b32 s4, s6
+; GCN-NEXT:    s_mov_b32 s5, s7
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    v_mov_b32_e32 v3, s2
+; GCN-NEXT:    v_mov_b32_e32 v4, s3
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s6
-; GCN-NEXT:    v_mov_b32_e32 v6, s7
+; GCN-NEXT:    s_mov_b32 s6, s8
+; GCN-NEXT:    s_mov_b32 s7, s9
+; GCN-NEXT:    v_mov_b32_e32 v5, s4
+; GCN-NEXT:    v_mov_b32_e32 v6, s5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GCN-NEXT:    v_mov_b32_e32 v7, s8
-; GCN-NEXT:    v_mov_b32_e32 v8, s9
+; GCN-NEXT:    v_mov_b32_e32 v7, s6
+; GCN-NEXT:    v_mov_b32_e32 v8, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
@@ -2453,45 +2517,69 @@ define amdgpu_ps double @dyn_extract_v6f64_s_v(<6 x double> inreg %vec, i32 %sel
 ;
 ; GFX10-LABEL: dyn_extract_v6f64_s_v:
 ; GFX10:       ; %bb.0: ; %entry
-; GFX10-NEXT:    v_mov_b32_e32 v1, s4
-; GFX10-NEXT:    v_mov_b32_e32 v2, s5
+; GFX10-NEXT:    s_mov_b32 s0, s2
+; GFX10-NEXT:    s_mov_b32 s2, s4
+; GFX10-NEXT:    s_mov_b32 s15, s5
+; GFX10-NEXT:    v_mov_b32_e32 v1, s2
+; GFX10-NEXT:    v_mov_b32_e32 v2, s15
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX10-NEXT:    s_mov_b32 s1, s3
+; GFX10-NEXT:    s_mov_b32 s4, s6
+; GFX10-NEXT:    s_mov_b32 s5, s7
+; GFX10-NEXT:    s_mov_b32 s6, s8
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX10-NEXT:    s_mov_b32 s7, s9
+; GFX10-NEXT:    s_mov_b32 s8, s10
+; GFX10-NEXT:    s_mov_b32 s9, s11
+; GFX10-NEXT:    s_mov_b32 s10, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    s_mov_b32 s11, s13
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s12, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s13, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s11, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_extract_v6f64_s_v:
 ; GFX11:       ; %bb.0: ; %entry
-; GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
+; GFX11-NEXT:    s_mov_b32 s0, s2
+; GFX11-NEXT:    s_mov_b32 s2, s4
+; GFX11-NEXT:    s_mov_b32 s15, s5
+; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s15
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX11-NEXT:    s_mov_b32 s1, s3
+; GFX11-NEXT:    s_mov_b32 s4, s6
+; GFX11-NEXT:    s_mov_b32 s5, s7
+; GFX11-NEXT:    s_mov_b32 s6, s8
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX11-NEXT:    s_mov_b32 s7, s9
+; GFX11-NEXT:    s_mov_b32 s8, s10
+; GFX11-NEXT:    s_mov_b32 s9, s11
+; GFX11-NEXT:    s_mov_b32 s10, s12
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT:    s_mov_b32 s11, s13
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s12, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s13, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s11, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    ; return to shader part epilog
@@ -2640,23 +2728,33 @@ entry:
 define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel) {
 ; GCN-LABEL: dyn_extract_v7f64_s_v:
 ; GCN:       ; %bb.0: ; %entry
-; GCN-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_mov_b32_e32 v4, s5
+; GCN-NEXT:    s_mov_b32 s0, s2
+; GCN-NEXT:    s_mov_b32 s1, s3
+; GCN-NEXT:    s_mov_b32 s2, s4
+; GCN-NEXT:    s_mov_b32 s3, s5
+; GCN-NEXT:    s_mov_b32 s4, s6
+; GCN-NEXT:    s_mov_b32 s5, s7
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    v_mov_b32_e32 v3, s2
+; GCN-NEXT:    v_mov_b32_e32 v4, s3
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s6
-; GCN-NEXT:    v_mov_b32_e32 v6, s7
+; GCN-NEXT:    s_mov_b32 s6, s8
+; GCN-NEXT:    s_mov_b32 s7, s9
+; GCN-NEXT:    v_mov_b32_e32 v5, s4
+; GCN-NEXT:    v_mov_b32_e32 v6, s5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GCN-NEXT:    v_mov_b32_e32 v7, s8
-; GCN-NEXT:    v_mov_b32_e32 v8, s9
+; GCN-NEXT:    s_mov_b32 s8, s10
+; GCN-NEXT:    s_mov_b32 s9, s11
+; GCN-NEXT:    v_mov_b32_e32 v7, s6
+; GCN-NEXT:    v_mov_b32_e32 v8, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GCN-NEXT:    v_mov_b32_e32 v9, s10
-; GCN-NEXT:    v_mov_b32_e32 v10, s11
+; GCN-NEXT:    v_mov_b32_e32 v9, s8
+; GCN-NEXT:    v_mov_b32_e32 v10, s9
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
@@ -2673,8 +2771,8 @@ define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GCN-NEXT:    ; kill: def $vgpr15 killed $sgpr2 killed $exec
-; GCN-NEXT:    ; kill: def $vgpr16 killed $sgpr3 killed $exec
+; GCN-NEXT:    ; kill: def $vgpr15 killed $sgpr14 killed $exec
+; GCN-NEXT:    ; kill: def $vgpr16 killed $sgpr15 killed $exec
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v15, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v16, vcc
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
@@ -2683,59 +2781,85 @@ define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel
 ;
 ; GFX10-LABEL: dyn_extract_v7f64_s_v:
 ; GFX10:       ; %bb.0: ; %entry
-; GFX10-NEXT:    v_mov_b32_e32 v1, s4
-; GFX10-NEXT:    v_mov_b32_e32 v2, s5
+; GFX10-NEXT:    s_mov_b32 s0, s2
+; GFX10-NEXT:    s_mov_b32 s2, s4
+; GFX10-NEXT:    s_mov_b32 s19, s5
+; GFX10-NEXT:    v_mov_b32_e32 v1, s2
+; GFX10-NEXT:    v_mov_b32_e32 v2, s19
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    s_mov_b32 s0, s14
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX10-NEXT:    s_mov_b32 s1, s3
+; GFX10-NEXT:    s_mov_b32 s4, s6
+; GFX10-NEXT:    s_mov_b32 s5, s7
+; GFX10-NEXT:    s_mov_b32 s6, s8
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX10-NEXT:    s_mov_b32 s7, s9
+; GFX10-NEXT:    s_mov_b32 s8, s10
+; GFX10-NEXT:    s_mov_b32 s9, s11
+; GFX10-NEXT:    s_mov_b32 s10, s12
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    s_mov_b32 s11, s13
+; GFX10-NEXT:    s_mov_b32 s12, s14
+; GFX10-NEXT:    s_mov_b32 s13, s15
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_extract_v7f64_s_v:
 ; GFX11:       ; %bb.0: ; %entry
-; GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
+; GFX11-NEXT:    s_mov_b32 s0, s2
+; GFX11-NEXT:    s_mov_b32 s2, s4
+; GFX11-NEXT:    s_mov_b32 s19, s5
+; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s19
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    s_mov_b32 s0, s14
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
+; GFX11-NEXT:    s_mov_b32 s1, s3
+; GFX11-NEXT:    s_mov_b32 s4, s6
+; GFX11-NEXT:    s_mov_b32 s5, s7
+; GFX11-NEXT:    s_mov_b32 s6, s8
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX11-NEXT:    s_mov_b32 s7, s9
+; GFX11-NEXT:    s_mov_b32 s8, s10
+; GFX11-NEXT:    s_mov_b32 s9, s11
+; GFX11-NEXT:    s_mov_b32 s10, s12
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT:    s_mov_b32 s11, s13
+; GFX11-NEXT:    s_mov_b32 s12, s14
+; GFX11-NEXT:    s_mov_b32 s13, s15
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    ; return to shader part epilog
@@ -4630,9 +4754,9 @@ define i32 @v_extract_v64i32_7(ptr addrspace(1) %ptr) {
 ; GPRIDX-LABEL: v_extract_v64i32_7:
 ; GPRIDX:       ; %bb.0:
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GPRIDX-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off offset:16
+; GPRIDX-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off offset:16
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
-; GPRIDX-NEXT:    v_mov_b32_e32 v0, v3
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v7
 ; GPRIDX-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; MOVREL-LABEL: v_extract_v64i32_7:
@@ -4640,27 +4764,27 @@ define i32 @v_extract_v64i32_7(ptr addrspace(1) %ptr) {
 ; MOVREL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; MOVREL-NEXT:    v_add_u32_e32 v0, vcc, 16, v0
 ; MOVREL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; MOVREL-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; MOVREL-NEXT:    flat_load_dwordx4 v[4:7], v[0:1]
 ; MOVREL-NEXT:    s_waitcnt vmcnt(0)
-; MOVREL-NEXT:    v_mov_b32_e32 v0, v3
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v7
 ; MOVREL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_extract_v64i32_7:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off offset:16
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off offset:16
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mov_b32_e32 v0, v3
+; GFX10-NEXT:    v_mov_b32_e32 v0, v7
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_extract_v64i32_7:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    global_load_b128 v[0:3], v[0:1], off offset:16
+; GFX11-NEXT:    global_load_b128 v[4:7], v[0:1], off offset:16
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_mov_b32_e32 v0, v3
+; GFX11-NEXT:    v_mov_b32_e32 v0, v7
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %vec = load <64 x i32>, ptr addrspace(1) %ptr
   %elt = extractelement <64 x i32> %vec, i32 7
@@ -4755,9 +4879,9 @@ define i32 @v_extract_v64i32_37(ptr addrspace(1) %ptr) {
 ; GPRIDX-LABEL: v_extract_v64i32_37:
 ; GPRIDX:       ; %bb.0:
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GPRIDX-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off offset:144
+; GPRIDX-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off offset:144
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
-; GPRIDX-NEXT:    v_mov_b32_e32 v0, v1
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v5
 ; GPRIDX-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; MOVREL-LABEL: v_extract_v64i32_37:
@@ -4765,27 +4889,27 @@ define i32 @v_extract_v64i32_37(ptr addrspace(1) %ptr) {
 ; MOVREL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; MOVREL-NEXT:    v_add_u32_e32 v0, vcc, 0x90, v0
 ; MOVREL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; MOVREL-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; MOVREL-NEXT:    flat_load_dwordx4 v[4:7], v[0:1]
 ; MOVREL-NEXT:    s_waitcnt vmcnt(0)
-; MOVREL-NEXT:    v_mov_b32_e32 v0, v1
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v5
 ; MOVREL-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_extract_v64i32_37:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off offset:144
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off offset:144
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mov_b32_e32 v0, v1
+; GFX10-NEXT:    v_mov_b32_e32 v0, v5
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_extract_v64i32_37:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    global_load_b128 v[0:3], v[0:1], off offset:144
+; GFX11-NEXT:    global_load_b128 v[4:7], v[0:1], off offset:144
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_mov_b32_e32 v0, v1
+; GFX11-NEXT:    v_mov_b32_e32 v0, v5
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %vec = load <64 x i32>, ptr addrspace(1) %ptr
   %elt = extractelement <64 x i32> %vec, i32 37

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
index 9eb2f0671522f..2f5f09e4d74a1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
@@ -3238,10 +3238,6 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[6:7], s12, 5
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], s12, 6
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], s12, 7
-; GFX9-NEXT:    v_mov_b32_e32 v10, 0
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
-; GFX9-NEXT:    v_mov_b32_e32 v12, 16
-; GFX9-NEXT:    v_mov_b32_e32 v13, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[0:1]
@@ -3251,18 +3247,22 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v7, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v9, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v14, v1, s13, v0
+; GFX9-NEXT:    v_and_or_b32 v10, v1, s13, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], s12, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, v14, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v14, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v4, v14, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v14, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v6, v14, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v7, v14, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v8, v14, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v9, v14, s[10:11]
-; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[12:13], v[4:7], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, v10, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v10, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v4, v10, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v10, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v6, v10, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v7, v10, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v8, v10, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v9, v10, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v8, 0
+; GFX9-NEXT:    v_mov_b32_e32 v9, 0
+; GFX9-NEXT:    v_mov_b32_e32 v10, 16
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
+; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_s_s:
@@ -3376,52 +3376,52 @@ define amdgpu_ps void @insertelement_s_v16i16_v_s(ptr addrspace(4) inreg %ptr, i
 ; GFX9-LABEL: insertelement_s_v16i16_v_s:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[2:3], 0x0
-; GFX9-NEXT:    s_lshr_b32 s0, s4, 1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 1
+; GFX9-NEXT:    s_lshr_b32 s2, s4, 1
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_cselect_b32 s1, s9, s8
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 2
-; GFX9-NEXT:    s_cselect_b32 s1, s10, s1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 3
-; GFX9-NEXT:    s_cselect_b32 s1, s11, s1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 4
-; GFX9-NEXT:    s_cselect_b32 s1, s12, s1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 5
-; GFX9-NEXT:    s_cselect_b32 s1, s13, s1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 6
-; GFX9-NEXT:    s_cselect_b32 s1, s14, s1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 7
-; GFX9-NEXT:    s_cselect_b32 s1, s15, s1
-; GFX9-NEXT:    s_and_b32 s2, s4, 1
-; GFX9-NEXT:    s_lshl_b32 s2, s2, 4
-; GFX9-NEXT:    s_lshl_b32 s3, 0xffff, s2
-; GFX9-NEXT:    s_andn2_b32 s1, s1, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_lshl_or_b32 v8, v0, s2, v1
+; GFX9-NEXT:    s_cselect_b32 s0, s9, s8
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 2
+; GFX9-NEXT:    s_cselect_b32 s0, s10, s0
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 3
+; GFX9-NEXT:    s_cselect_b32 s0, s11, s0
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 4
+; GFX9-NEXT:    s_cselect_b32 s0, s12, s0
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 5
+; GFX9-NEXT:    s_cselect_b32 s0, s13, s0
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 6
+; GFX9-NEXT:    s_cselect_b32 s0, s14, s0
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 7
+; GFX9-NEXT:    s_cselect_b32 s0, s15, s0
+; GFX9-NEXT:    s_and_b32 s1, s4, 1
+; GFX9-NEXT:    s_lshl_b32 s1, s1, 4
+; GFX9-NEXT:    s_lshl_b32 s3, 0xffff, s1
+; GFX9-NEXT:    s_andn2_b32 s0, s0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    v_lshl_or_b32 v8, v0, s1, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 1
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 2
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s11
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 3
-; GFX9-NEXT:    v_mov_b32_e32 v4, s12
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 4
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 3
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s13
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 5
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 5
 ; GFX9-NEXT:    v_mov_b32_e32 v6, s14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 6
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 6
+; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_mov_b32_e32 v7, s15
+; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, 4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v6, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 7
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 7
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v8, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v9, 0
@@ -4161,31 +4161,31 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v0
 ; GFX9-NEXT:    v_not_b32_e32 v1, v1
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v0
-; GFX9-NEXT:    v_mov_b32_e32 v12, 0
-; GFX9-NEXT:    v_mov_b32_e32 v13, 16
-; GFX9-NEXT:    v_mov_b32_e32 v14, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
-; GFX9-NEXT:    v_cndmask_b32_e32 v15, v3, v4, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, v15, v5, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, v15, v6, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e32 v11, v3, v4, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v11, v5, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v11, v6, s[2:3]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, v15, v7, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, v15, v8, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, v15, v9, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v15, v15, v10, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v15, v15, v1, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v15, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v15, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v15, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v15, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v15, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v15, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v15, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v15, s[10:11]
-; GFX9-NEXT:    global_store_dwordx4 v[11:12], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[13:14], v[4:7], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v11, v7, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v11, v8, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v11, v9, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, v11, v10, s[10:11]
+; GFX9-NEXT:    v_and_or_b32 v11, v11, v1, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v11, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
+; GFX9-NEXT:    v_mov_b32_e32 v8, 0
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v9, 0
+; GFX9-NEXT:    v_mov_b32_e32 v10, 16
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
+; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_s_v:
@@ -4209,32 +4209,32 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v0
 ; GFX8-NEXT:    v_not_b32_e32 v1, v1
-; GFX8-NEXT:    v_mov_b32_e32 v11, 0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v0
-; GFX8-NEXT:    v_mov_b32_e32 v12, 0
-; GFX8-NEXT:    v_mov_b32_e32 v13, 16
-; GFX8-NEXT:    v_mov_b32_e32 v14, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v3, v4, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v15, v15, v5, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v15, v15, v6, s[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e32 v11, v3, v4, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v11, v5, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v11, v6, s[2:3]
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_cndmask_b32_e64 v15, v15, v7, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v15, v15, v8, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v15, v15, v9, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v15, v15, v10, s[10:11]
-; GFX8-NEXT:    v_and_b32_e32 v1, v15, v1
-; GFX8-NEXT:    v_or_b32_e32 v15, v1, v2
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v3, v15, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v4, v15, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v5, v15, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v6, v15, s[2:3]
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v7, v15, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v8, v15, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v9, v15, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v10, v15, s[10:11]
-; GFX8-NEXT:    flat_store_dwordx4 v[11:12], v[0:3]
-; GFX8-NEXT:    flat_store_dwordx4 v[13:14], v[4:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v11, v7, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v11, v8, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v11, v9, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v11, v11, v10, s[10:11]
+; GFX8-NEXT:    v_and_b32_e32 v1, v11, v1
+; GFX8-NEXT:    v_or_b32_e32 v11, v1, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v3, v11, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
+; GFX8-NEXT:    v_mov_b32_e32 v8, 0
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
+; GFX8-NEXT:    v_mov_b32_e32 v9, 0
+; GFX8-NEXT:    v_mov_b32_e32 v10, 16
+; GFX8-NEXT:    v_mov_b32_e32 v11, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
+; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX7-LABEL: insertelement_v_v16i16_s_v:
@@ -4405,10 +4405,6 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[6:7], s12, 5
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], s12, 6
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], s12, 7
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
-; GFX9-NEXT:    v_mov_b32_e32 v12, 0
-; GFX9-NEXT:    v_mov_b32_e32 v13, 16
-; GFX9-NEXT:    v_mov_b32_e32 v14, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[0:1]
@@ -4418,18 +4414,22 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v9, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v10, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v15, v1, s13, v0
+; GFX9-NEXT:    v_and_or_b32 v11, v1, s13, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], s12, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v15, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v15, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v15, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v15, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v15, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v15, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v15, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v15, s[10:11]
-; GFX9-NEXT:    global_store_dwordx4 v[11:12], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[13:14], v[4:7], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v11, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
+; GFX9-NEXT:    v_mov_b32_e32 v8, 0
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v9, 0
+; GFX9-NEXT:    v_mov_b32_e32 v10, 16
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
+; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_v_s:
@@ -4557,11 +4557,7 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v0
 ; GFX9-NEXT:    v_not_b32_e32 v1, v1
-; GFX9-NEXT:    v_mov_b32_e32 v12, 0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v0
-; GFX9-NEXT:    v_mov_b32_e32 v13, 0
-; GFX9-NEXT:    v_mov_b32_e32 v14, 16
-; GFX9-NEXT:    v_mov_b32_e32 v15, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
@@ -4571,17 +4567,21 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v9, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v10, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v16, v3, v1, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v4, v16, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v5, v16, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v6, v16, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v7, v16, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v8, v16, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v9, v16, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v10, v16, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v11, v16, s[10:11]
-; GFX9-NEXT:    global_store_dwordx4 v[12:13], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[14:15], v[4:7], off
+; GFX9-NEXT:    v_and_or_b32 v12, v3, v1, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v4, v12, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v5, v12, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v8, v12, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v9, v12, s[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v8, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v6, v12, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v7, v12, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v10, v12, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v11, v12, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v9, 0
+; GFX9-NEXT:    v_mov_b32_e32 v10, 16
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
+; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_v_v:
@@ -4604,11 +4604,7 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v0
 ; GFX8-NEXT:    v_not_b32_e32 v1, v1
-; GFX8-NEXT:    v_mov_b32_e32 v12, 0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v0
-; GFX8-NEXT:    v_mov_b32_e32 v13, 0
-; GFX8-NEXT:    v_mov_b32_e32 v14, 16
-; GFX8-NEXT:    v_mov_b32_e32 v15, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
@@ -4619,17 +4615,21 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v3, v10, s[8:9]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[10:11]
 ; GFX8-NEXT:    v_and_b32_e32 v1, v3, v1
-; GFX8-NEXT:    v_or_b32_e32 v16, v1, v2
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, v16, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v16, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v6, v16, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v7, v16, s[2:3]
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v8, v16, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v9, v16, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v10, v16, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v11, v16, s[10:11]
-; GFX8-NEXT:    flat_store_dwordx4 v[12:13], v[0:3]
-; GFX8-NEXT:    flat_store_dwordx4 v[14:15], v[4:7]
+; GFX8-NEXT:    v_or_b32_e32 v12, v1, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, v12, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v12, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v8, v12, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v9, v12, s[6:7]
+; GFX8-NEXT:    v_mov_b32_e32 v8, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v6, v12, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v7, v12, s[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v10, v12, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v11, v12, s[10:11]
+; GFX8-NEXT:    v_mov_b32_e32 v9, 0
+; GFX8-NEXT:    v_mov_b32_e32 v10, 16
+; GFX8-NEXT:    v_mov_b32_e32 v11, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
+; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX7-LABEL: insertelement_v_v16i16_v_v:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
index 7532341b39ac0..714bb5687ab88 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
@@ -9,34 +9,43 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GCN-NEXT:    v_lshlrev_b32_e32 v64, 8, v0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:144
 ; GCN-NEXT:    global_load_dwordx4 v[0:3], v64, s[0:1]
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:16
-; GCN-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:32
-; GCN-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:48
-; GCN-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:64
-; GCN-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:80
-; GCN-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:96
-; GCN-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:112
-; GCN-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:128
-; GCN-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:144
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:32
+; GCN-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:48
+; GCN-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:64
+; GCN-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:80
+; GCN-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:96
+; GCN-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:112
+; GCN-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:128
 ; GCN-NEXT:    global_load_dwordx4 v[40:43], v64, s[0:1] offset:160
 ; GCN-NEXT:    global_load_dwordx4 v[44:47], v64, s[0:1] offset:176
 ; GCN-NEXT:    global_load_dwordx4 v[48:51], v64, s[0:1] offset:192
 ; GCN-NEXT:    global_load_dwordx4 v[52:55], v64, s[0:1] offset:208
 ; GCN-NEXT:    global_load_dwordx4 v[56:59], v64, s[0:1] offset:224
 ; GCN-NEXT:    global_load_dwordx4 v[60:63], v64, s[0:1] offset:240
-; GCN-NEXT:    s_waitcnt vmcnt(6)
-; GCN-NEXT:    v_mov_b32_e32 v37, 0x3e7
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    v_mov_b32_e32 v5, 0x3e7
+; GCN-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:144
+; GCN-NEXT:    s_waitcnt vmcnt(15)
 ; GCN-NEXT:    global_store_dwordx4 v64, v[0:3], s[2:3]
-; GCN-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:16
-; GCN-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:32
-; GCN-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:48
-; GCN-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:64
-; GCN-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:80
-; GCN-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:96
-; GCN-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:112
-; GCN-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:128
-; GCN-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:144
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:16
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:32
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:48
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:64
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:80
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:96
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:112
+; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:128
 ; GCN-NEXT:    s_waitcnt vmcnt(15)
 ; GCN-NEXT:    global_store_dwordx4 v64, v[40:43], s[2:3] offset:160
 ; GCN-NEXT:    s_waitcnt vmcnt(15)
@@ -58,45 +67,39 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0xf
 ; GFX10-NEXT:    global_load_dwordx4 v[0:3], v64, s[0:1]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:16
-; GFX10-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:32
-; GFX10-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:48
-; GFX10-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:64
-; GFX10-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:80
-; GFX10-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:96
-; GFX10-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:112
-; GFX10-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:128
-; GFX10-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:144
-; GFX10-NEXT:    global_load_dwordx4 v[40:43], v64, s[0:1] offset:160
-; GFX10-NEXT:    global_load_dwordx4 v[44:47], v64, s[0:1] offset:176
-; GFX10-NEXT:    global_load_dwordx4 v[48:51], v64, s[0:1] offset:192
-; GFX10-NEXT:    global_load_dwordx4 v[52:55], v64, s[0:1] offset:208
-; GFX10-NEXT:    global_load_dwordx4 v[56:59], v64, s[0:1] offset:224
-; GFX10-NEXT:    global_load_dwordx4 v[60:63], v64, s[0:1] offset:240
-; GFX10-NEXT:    s_waitcnt vmcnt(6)
-; GFX10-NEXT:    v_mov_b32_e32 v37, 0x3e7
-; GFX10-NEXT:    global_store_dwordx4 v64, v[0:3], s[2:3]
-; GFX10-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:16
-; GFX10-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:32
-; GFX10-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:48
-; GFX10-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:64
-; GFX10-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:80
-; GFX10-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:96
-; GFX10-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:112
-; GFX10-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:128
-; GFX10-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:144
-; GFX10-NEXT:    s_waitcnt vmcnt(5)
-; GFX10-NEXT:    global_store_dwordx4 v64, v[40:43], s[2:3] offset:160
-; GFX10-NEXT:    s_waitcnt vmcnt(4)
-; GFX10-NEXT:    global_store_dwordx4 v64, v[44:47], s[2:3] offset:176
-; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    global_store_dwordx4 v64, v[48:51], s[2:3] offset:192
-; GFX10-NEXT:    s_waitcnt vmcnt(2)
-; GFX10-NEXT:    global_store_dwordx4 v64, v[52:55], s[2:3] offset:208
-; GFX10-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-NEXT:    global_store_dwordx4 v64, v[56:59], s[2:3] offset:224
+; GFX10-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:16
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:32
+; GFX10-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:48
+; GFX10-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:64
+; GFX10-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:80
+; GFX10-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:96
+; GFX10-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:112
+; GFX10-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:160
+; GFX10-NEXT:    global_load_dwordx4 v[40:43], v64, s[0:1] offset:176
+; GFX10-NEXT:    global_load_dwordx4 v[44:47], v64, s[0:1] offset:192
+; GFX10-NEXT:    global_load_dwordx4 v[48:51], v64, s[0:1] offset:208
+; GFX10-NEXT:    global_load_dwordx4 v[52:55], v64, s[0:1] offset:224
+; GFX10-NEXT:    global_load_dwordx4 v[56:59], v64, s[0:1] offset:240
+; GFX10-NEXT:    global_load_dwordx4 v[60:63], v64, s[0:1] offset:128
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:144
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    global_store_dwordx4 v64, v[60:63], s[2:3] offset:240
+; GFX10-NEXT:    v_mov_b32_e32 v5, 0x3e7
+; GFX10-NEXT:    global_store_dwordx4 v64, v[0:3], s[2:3]
+; GFX10-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:16
+; GFX10-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:32
+; GFX10-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:48
+; GFX10-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:64
+; GFX10-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:80
+; GFX10-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:96
+; GFX10-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:112
+; GFX10-NEXT:    global_store_dwordx4 v64, v[60:63], s[2:3] offset:128
+; GFX10-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:144
+; GFX10-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:160
+; GFX10-NEXT:    global_store_dwordx4 v64, v[40:43], s[2:3] offset:176
+; GFX10-NEXT:    global_store_dwordx4 v64, v[44:47], s[2:3] offset:192
+; GFX10-NEXT:    global_store_dwordx4 v64, v[48:51], s[2:3] offset:208
+; GFX10-NEXT:    global_store_dwordx4 v64, v[52:55], s[2:3] offset:224
+; GFX10-NEXT:    global_store_dwordx4 v64, v[56:59], s[2:3] offset:240
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_insert_v64i32_37:
@@ -106,15 +109,15 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0xf
 ; GFX11-NEXT:    global_load_b128 v[0:3], v64, s[0:1]
-; GFX11-NEXT:    global_load_b128 v[4:7], v64, s[0:1] offset:16
-; GFX11-NEXT:    global_load_b128 v[8:11], v64, s[0:1] offset:32
-; GFX11-NEXT:    global_load_b128 v[12:15], v64, s[0:1] offset:48
-; GFX11-NEXT:    global_load_b128 v[16:19], v64, s[0:1] offset:64
-; GFX11-NEXT:    global_load_b128 v[20:23], v64, s[0:1] offset:80
-; GFX11-NEXT:    global_load_b128 v[24:27], v64, s[0:1] offset:96
-; GFX11-NEXT:    global_load_b128 v[28:31], v64, s[0:1] offset:112
-; GFX11-NEXT:    global_load_b128 v[32:35], v64, s[0:1] offset:128
-; GFX11-NEXT:    global_load_b128 v[36:39], v64, s[0:1] offset:144
+; GFX11-NEXT:    global_load_b128 v[8:11], v64, s[0:1] offset:16
+; GFX11-NEXT:    global_load_b128 v[12:15], v64, s[0:1] offset:32
+; GFX11-NEXT:    global_load_b128 v[16:19], v64, s[0:1] offset:48
+; GFX11-NEXT:    global_load_b128 v[20:23], v64, s[0:1] offset:64
+; GFX11-NEXT:    global_load_b128 v[24:27], v64, s[0:1] offset:80
+; GFX11-NEXT:    global_load_b128 v[28:31], v64, s[0:1] offset:96
+; GFX11-NEXT:    global_load_b128 v[32:35], v64, s[0:1] offset:112
+; GFX11-NEXT:    global_load_b128 v[36:39], v64, s[0:1] offset:128
+; GFX11-NEXT:    global_load_b128 v[4:7], v64, s[0:1] offset:144
 ; GFX11-NEXT:    global_load_b128 v[40:43], v64, s[0:1] offset:160
 ; GFX11-NEXT:    global_load_b128 v[44:47], v64, s[0:1] offset:176
 ; GFX11-NEXT:    global_load_b128 v[48:51], v64, s[0:1] offset:192
@@ -122,18 +125,18 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GFX11-NEXT:    global_load_b128 v[56:59], v64, s[0:1] offset:224
 ; GFX11-NEXT:    global_load_b128 v[60:63], v64, s[0:1] offset:240
 ; GFX11-NEXT:    s_waitcnt vmcnt(6)
-; GFX11-NEXT:    v_mov_b32_e32 v37, 0x3e7
+; GFX11-NEXT:    v_mov_b32_e32 v5, 0x3e7
 ; GFX11-NEXT:    s_clause 0x9
 ; GFX11-NEXT:    global_store_b128 v64, v[0:3], s[2:3]
-; GFX11-NEXT:    global_store_b128 v64, v[4:7], s[2:3] offset:16
-; GFX11-NEXT:    global_store_b128 v64, v[8:11], s[2:3] offset:32
-; GFX11-NEXT:    global_store_b128 v64, v[12:15], s[2:3] offset:48
-; GFX11-NEXT:    global_store_b128 v64, v[16:19], s[2:3] offset:64
-; GFX11-NEXT:    global_store_b128 v64, v[20:23], s[2:3] offset:80
-; GFX11-NEXT:    global_store_b128 v64, v[24:27], s[2:3] offset:96
-; GFX11-NEXT:    global_store_b128 v64, v[28:31], s[2:3] offset:112
-; GFX11-NEXT:    global_store_b128 v64, v[32:35], s[2:3] offset:128
-; GFX11-NEXT:    global_store_b128 v64, v[36:39], s[2:3] offset:144
+; GFX11-NEXT:    global_store_b128 v64, v[8:11], s[2:3] offset:16
+; GFX11-NEXT:    global_store_b128 v64, v[12:15], s[2:3] offset:32
+; GFX11-NEXT:    global_store_b128 v64, v[16:19], s[2:3] offset:48
+; GFX11-NEXT:    global_store_b128 v64, v[20:23], s[2:3] offset:64
+; GFX11-NEXT:    global_store_b128 v64, v[24:27], s[2:3] offset:80
+; GFX11-NEXT:    global_store_b128 v64, v[28:31], s[2:3] offset:96
+; GFX11-NEXT:    global_store_b128 v64, v[32:35], s[2:3] offset:112
+; GFX11-NEXT:    global_store_b128 v64, v[36:39], s[2:3] offset:128
+; GFX11-NEXT:    global_store_b128 v64, v[4:7], s[2:3] offset:144
 ; GFX11-NEXT:    s_waitcnt vmcnt(5)
 ; GFX11-NEXT:    global_store_b128 v64, v[40:43], s[2:3] offset:160
 ; GFX11-NEXT:    s_waitcnt vmcnt(4)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
index 575d52505b85b..7d5c1fade0b3b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
@@ -717,27 +717,27 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GPRIDX-NEXT:    v_mov_b32_e32 v16, s17
 ; GPRIDX-NEXT:    v_mov_b32_e32 v17, s18
 ; GPRIDX-NEXT:    v_mov_b32_e32 v18, s19
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s[4:5]
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 2, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 3, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[16:17], 0, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 2, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 3, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 4, v2
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 5, v2
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v2
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[14:15], 7, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[16:17], 4, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s[16:17]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v0, s[16:17]
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v1, s[16:17]
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v1, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s[8:9]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s[10:11]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v0, s[12:13]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s[14:15]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s[16:17]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s[8:9]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s[10:11]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v16, v16, v1, s[12:13]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s[14:15]
@@ -770,7 +770,6 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GFX10-NEXT:    s_mov_b64 s[6:7], 2.0
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s5
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s6
 ; GFX10-NEXT:    v_mov_b32_e32 v6, s7
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s8
@@ -785,29 +784,30 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GFX10-NEXT:    v_mov_b32_e32 v16, s17
 ; GFX10-NEXT:    v_mov_b32_e32 v17, s18
 ; GFX10-NEXT:    v_mov_b32_e32 v18, s19
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 2, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 3, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s7, 5, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s10, 4, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s8, 6, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s9, 7, v2
 ; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 3, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s4
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s4
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v15, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s10
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s10
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s7
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v0, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s9
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, v1, s8
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s9
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[3:6], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[7:10], off
@@ -994,26 +994,26 @@ define amdgpu_ps void @dyn_insertelement_v8f64_s_s_v(<8 x double> inreg %vec, do
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s18, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s19, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 2, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s18, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s19, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s19, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s18, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s18, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, s19, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, s18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s19, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, s19, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s18, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, s19, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, s18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s19, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 4, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 5, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 6, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 7, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s18, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s19, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, s19, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, s18, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s19, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s18, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, s19, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, s18, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s19, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s18, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, s19, s4
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[1:4], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[5:8], off
@@ -1395,9 +1395,6 @@ define amdgpu_ps void @dyn_insertelement_v8f64_s_v_v(<8 x double> inreg %vec, do
 ; GFX10-NEXT:    s_mov_b32 s12, s14
 ; GFX10-NEXT:    s_mov_b32 s14, s16
 ; GFX10-NEXT:    v_mov_b32_e32 v18, s15
-; GFX10-NEXT:    v_mov_b32_e32 v4, s1
-; GFX10-NEXT:    v_mov_b32_e32 v3, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v17, s14
 ; GFX10-NEXT:    v_mov_b32_e32 v16, s13
 ; GFX10-NEXT:    v_mov_b32_e32 v15, s12
@@ -1411,29 +1408,32 @@ define amdgpu_ps void @dyn_insertelement_v8f64_s_v_v(<8 x double> inreg %vec, do
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v6, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s2
+; GFX10-NEXT:    v_mov_b32_e32 v4, s1
+; GFX10-NEXT:    v_mov_b32_e32 v3, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 3, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 2, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 4, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 5, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 6, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v2
 ; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v2
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v15, v15, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v0, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, v1, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s5
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[3:6], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[7:10], off
@@ -1517,31 +1517,31 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double i
 ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_s_v:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_mov_b32_e32 v17, s2
-; GPRIDX-NEXT:    v_mov_b32_e32 v18, s3
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[14:15], 2, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 5, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v16
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v16
+; GPRIDX-NEXT:    v_mov_b32_e32 v16, s3
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v10, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v11, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v12, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v13, v18, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v16
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v14, v14, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v15, v15, v18, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v17, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v16, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v17, s[14:15]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v17, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v17, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v17, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v17, s[12:13]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v17, s[10:11]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v16, s[14:15]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v16, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v16, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v16, s[12:13]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v16, s[10:11]
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1555,29 +1555,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double i
 ; GFX10-LABEL: dyn_insertelement_v8f64_v_s_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s3, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s2, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s3, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s2, s1
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s3, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s3, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s3, s1
 ; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, s3, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s2, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s3, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, s3, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s3, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1591,29 +1591,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double i
 ; GFX11-LABEL: dyn_insertelement_v8f64_v_s_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s3, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s2, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s3, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, s2, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, s3, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s3, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, s3, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, s3, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, s2, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, s3, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, s3, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, s3, vcc_lo
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
@@ -1702,29 +1702,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %
 ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_v_v:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s[0:1]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 2, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 5, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v10, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v11, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v14, v14, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v15, v15, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s[12:13]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s[10:11]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s[12:13]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s[10:11]
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1739,28 +1739,28 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1775,24 +1775,27 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v16 :: v_dual_cndmask_b32 v1, v1, v17
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v16 :: v_dual_cndmask_b32 v5, v5, v17
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v16 :: v_dual_cndmask_b32 v9, v9, v17
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
-; GFX11-NEXT:    v_dual_cndmask_b32 v12, v12, v16 :: v_dual_cndmask_b32 v13, v13, v17
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
@@ -2429,29 +2432,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_add_u32_e32 v18, 1, v18
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s[0:1]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 2, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 5, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v18
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v10, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v11, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v18
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v14, v14, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v15, v15, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s[12:13]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s[10:11]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s[12:13]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s[10:11]
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -2467,28 +2470,28 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GFX10-NEXT:    v_add_nc_u32_e32 v18, 1, v18
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -2505,26 +2508,27 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
-; GFX11-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
-; GFX11-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
-; GFX11-NEXT:    v_dual_cndmask_b32 v13, v13, v17 :: v_dual_cndmask_b32 v12, v12, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
@@ -5666,6 +5670,10 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GPRIDX-NEXT:    s_mov_b32 s12, s14
 ; GPRIDX-NEXT:    s_mov_b32 s13, s15
 ; GPRIDX-NEXT:    v_mov_b32_e32 v18, s15
+; GPRIDX-NEXT:    v_mov_b32_e32 v3, s0
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GPRIDX-NEXT:    v_mov_b32_e32 v4, s1
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
 ; GPRIDX-NEXT:    v_mov_b32_e32 v17, s14
 ; GPRIDX-NEXT:    v_mov_b32_e32 v16, s13
 ; GPRIDX-NEXT:    v_mov_b32_e32 v15, s12
@@ -5679,43 +5687,39 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GPRIDX-NEXT:    v_mov_b32_e32 v7, s4
 ; GPRIDX-NEXT:    v_mov_b32_e32 v6, s3
 ; GPRIDX-NEXT:    v_mov_b32_e32 v5, s2
-; GPRIDX-NEXT:    v_mov_b32_e32 v4, s1
-; GPRIDX-NEXT:    v_mov_b32_e32 v3, s0
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 3, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 4, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 5, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 1, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v5, v0, s[10:11]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v7, v0, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v9, v0, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v11, v0, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v13, v0, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v0, v15, v0, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s[10:11]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v14, v1, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s[8:9]
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v3
-; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v4
-; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v2
-; GPRIDX-NEXT:    v_readfirstlane_b32 s3, v6
-; GPRIDX-NEXT:    v_readfirstlane_b32 s4, v5
-; GPRIDX-NEXT:    v_readfirstlane_b32 s5, v8
-; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v7
-; GPRIDX-NEXT:    v_readfirstlane_b32 s7, v10
-; GPRIDX-NEXT:    v_readfirstlane_b32 s8, v9
-; GPRIDX-NEXT:    v_readfirstlane_b32 s9, v12
-; GPRIDX-NEXT:    v_readfirstlane_b32 s10, v11
-; GPRIDX-NEXT:    v_readfirstlane_b32 s11, v13
-; GPRIDX-NEXT:    v_readfirstlane_b32 s12, v0
-; GPRIDX-NEXT:    v_readfirstlane_b32 s13, v1
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v4, v1, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
+; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v3
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v5, v0, vcc
+; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v3
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v6, v1, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v7, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v8, v1, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v9, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v10, v1, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v11, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v12, v1, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v13, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v14, v1, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v15, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v16, v1, vcc
+; GPRIDX-NEXT:    v_readfirstlane_b32 s3, v3
+; GPRIDX-NEXT:    v_readfirstlane_b32 s4, v4
+; GPRIDX-NEXT:    v_readfirstlane_b32 s5, v5
+; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v6
+; GPRIDX-NEXT:    v_readfirstlane_b32 s7, v7
+; GPRIDX-NEXT:    v_readfirstlane_b32 s8, v8
+; GPRIDX-NEXT:    v_readfirstlane_b32 s9, v9
+; GPRIDX-NEXT:    v_readfirstlane_b32 s10, v10
+; GPRIDX-NEXT:    v_readfirstlane_b32 s11, v11
+; GPRIDX-NEXT:    v_readfirstlane_b32 s12, v12
+; GPRIDX-NEXT:    v_readfirstlane_b32 s13, v13
 ; GPRIDX-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: dyn_insertelement_v7f64_s_v_v:
@@ -5735,9 +5739,13 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GFX10-NEXT:    s_mov_b32 s12, s14
 ; GFX10-NEXT:    s_mov_b32 s13, s15
 ; GFX10-NEXT:    v_mov_b32_e32 v18, s15
+; GFX10-NEXT:    v_mov_b32_e32 v3, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v17, s14
 ; GFX10-NEXT:    v_mov_b32_e32 v16, s13
 ; GFX10-NEXT:    v_mov_b32_e32 v15, s12
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
 ; GFX10-NEXT:    v_mov_b32_e32 v14, s11
 ; GFX10-NEXT:    v_mov_b32_e32 v13, s10
 ; GFX10-NEXT:    v_mov_b32_e32 v12, s9
@@ -5748,43 +5756,39 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v6, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s2
-; GFX10-NEXT:    v_mov_b32_e32 v4, s1
-; GFX10-NEXT:    v_mov_b32_e32 v3, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 6, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s0, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v4, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v5, v0, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v6, v1, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v1, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v7, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v8, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
+; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v9, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v10, v1, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v12, v1, vcc_lo
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v6
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v13, v0, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v14, v1, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v15, v0, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s1
-; GFX10-NEXT:    v_readfirstlane_b32 s0, v3
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v4
-; GFX10-NEXT:    v_readfirstlane_b32 s4, v7
-; GFX10-NEXT:    v_readfirstlane_b32 s5, v8
-; GFX10-NEXT:    v_readfirstlane_b32 s6, v9
-; GFX10-NEXT:    v_readfirstlane_b32 s7, v10
-; GFX10-NEXT:    v_readfirstlane_b32 s8, v11
-; GFX10-NEXT:    v_readfirstlane_b32 s9, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s10, v12
-; GFX10-NEXT:    v_readfirstlane_b32 s11, v13
-; GFX10-NEXT:    v_readfirstlane_b32 s12, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s13, v1
+; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
+; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v11, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v12, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
+; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v13, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v14, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s10, v10
+; GFX10-NEXT:    v_readfirstlane_b32 s11, v11
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v15, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v16, v1, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s12, v12
+; GFX10-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_insertelement_v7f64_s_v_v:
@@ -5804,45 +5808,45 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GFX11-NEXT:    s_mov_b32 s12, s14
 ; GFX11-NEXT:    s_mov_b32 s13, s15
 ; GFX11-NEXT:    v_dual_mov_b32 v18, s15 :: v_dual_mov_b32 v17, s14
+; GFX11-NEXT:    v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v3, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX11-NEXT:    v_dual_mov_b32 v16, s13 :: v_dual_mov_b32 v15, s12
 ; GFX11-NEXT:    v_dual_mov_b32 v14, s11 :: v_dual_mov_b32 v13, s10
 ; GFX11-NEXT:    v_dual_mov_b32 v12, s9 :: v_dual_mov_b32 v11, s8
 ; GFX11-NEXT:    v_dual_mov_b32 v10, s7 :: v_dual_mov_b32 v9, s6
 ; GFX11-NEXT:    v_dual_mov_b32 v8, s5 :: v_dual_mov_b32 v7, s4
 ; GFX11-NEXT:    v_dual_mov_b32 v6, s3 :: v_dual_mov_b32 v5, s2
-; GFX11-NEXT:    v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v3, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v2
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 6, v2
-; GFX11-NEXT:    v_dual_cndmask_b32 v3, v3, v0 :: v_dual_cndmask_b32 v4, v4, v1
+; GFX11-NEXT:    v_dual_cndmask_b32 v18, v3, v0 :: v_dual_cndmask_b32 v17, v4, v1
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v2
-; GFX11-NEXT:    v_dual_cndmask_b32 v7, v7, v0 :: v_dual_cndmask_b32 v8, v8, v1
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v2
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v5
-; GFX11-NEXT:    v_dual_cndmask_b32 v11, v11, v0 :: v_dual_cndmask_b32 v2, v12, v1
-; GFX11-NEXT:    v_readfirstlane_b32 s3, v6
-; GFX11-NEXT:    v_cndmask_b32_e64 v12, v13, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v13, v14, v1, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v15, v0, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s1
-; GFX11-NEXT:    v_readfirstlane_b32 s0, v3
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v4
-; GFX11-NEXT:    v_readfirstlane_b32 s4, v7
-; GFX11-NEXT:    v_readfirstlane_b32 s5, v8
-; GFX11-NEXT:    v_readfirstlane_b32 s6, v9
-; GFX11-NEXT:    v_readfirstlane_b32 s7, v10
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s6, 4, v2
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v5, v0, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v6, v1, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s9, 6, v2
+; GFX11-NEXT:    v_dual_cndmask_b32 v6, v7, v0 :: v_dual_cndmask_b32 v5, v8, v1
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v2
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s6
+; GFX11-NEXT:    v_readfirstlane_b32 s0, v18
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v17
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v3
+; GFX11-NEXT:    v_dual_cndmask_b32 v7, v9, v0 :: v_dual_cndmask_b32 v8, v10, v1
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v2
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, v12, v1, s6
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v15, v0, s9
+; GFX11-NEXT:    v_readfirstlane_b32 s3, v4
+; GFX11-NEXT:    v_readfirstlane_b32 s4, v6
+; GFX11-NEXT:    v_cndmask_b32_e32 v10, v13, v0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v14, v14, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v16, v1, s9
+; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX11-NEXT:    v_readfirstlane_b32 s6, v7
+; GFX11-NEXT:    v_readfirstlane_b32 s7, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s8, v11
-; GFX11-NEXT:    v_readfirstlane_b32 s9, v2
-; GFX11-NEXT:    v_readfirstlane_b32 s10, v12
-; GFX11-NEXT:    v_readfirstlane_b32 s11, v13
-; GFX11-NEXT:    v_readfirstlane_b32 s12, v0
-; GFX11-NEXT:    v_readfirstlane_b32 s13, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
+; GFX11-NEXT:    v_readfirstlane_b32 s10, v10
+; GFX11-NEXT:    v_readfirstlane_b32 s11, v14
+; GFX11-NEXT:    v_readfirstlane_b32 s12, v12
+; GFX11-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <7 x double> %vec, double %val, i32 %idx
@@ -5943,38 +5947,38 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_v_v_v(<7 x double> %vec,
 ; GFX10-LABEL: dyn_insertelement_v7f64_v_v_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 6, v16
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v14, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v14, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v15, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v14, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v14, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v15, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v15, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v15, vcc_lo
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v14, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v15, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v15, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v15, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v15, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v15, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
+; GFX10-NEXT:    v_cndmask_b32_e32 v10, v10, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v15, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s10, v10
 ; GFX10-NEXT:    v_readfirstlane_b32 s11, v11
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v13, v15, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s12, v12
 ; GFX10-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX10-NEXT:    ; return to shader part epilog
@@ -5982,37 +5986,35 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_v_v_v(<7 x double> %vec,
 ; GFX11-LABEL: dyn_insertelement_v7f64_v_v_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 6, v16
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s9, 5, v16
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s10, 6, v16
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v14 :: v_dual_cndmask_b32 v1, v1, v15
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v14, s9
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v15, s9
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v14, s10
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v15, s10
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v14 :: v_dual_cndmask_b32 v3, v3, v15
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v14, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v15, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v14, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v14 :: v_dual_cndmask_b32 v5, v5, v15
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v14, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v15, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v15, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v14 :: v_dual_cndmask_b32 v9, v9, v15
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v14, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v15, s0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v14 :: v_dual_cndmask_b32 v5, v5, v15
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX11-NEXT:    v_readfirstlane_b32 s10, v10
+; GFX11-NEXT:    v_readfirstlane_b32 s11, v11
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX11-NEXT:    v_dual_cndmask_b32 v6, v6, v14 :: v_dual_cndmask_b32 v7, v7, v15
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
+; GFX11-NEXT:    v_readfirstlane_b32 s12, v12
+; GFX11-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v14 :: v_dual_cndmask_b32 v9, v9, v15
 ; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
-; GFX11-NEXT:    v_readfirstlane_b32 s10, v10
-; GFX11-NEXT:    v_readfirstlane_b32 s11, v11
-; GFX11-NEXT:    v_readfirstlane_b32 s12, v12
-; GFX11-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <7 x double> %vec, double %val, i32 %idx
@@ -6346,20 +6348,20 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_s(<5 x double> %vec,
 ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_s:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, 1
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], s2, 2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], s2, 3
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], s2, 4
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s[6:7]
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 1
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 3
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 4
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v11, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s[6:7]
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v0
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v1
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v2
@@ -6375,56 +6377,55 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_s(<5 x double> %vec,
 ; GFX10-LABEL: dyn_insertelement_v5f64_v_v_s:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s2, 1
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, s2, 4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s2, 4
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 1
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s0
+; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 2
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s2, 3
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
+; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
+; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
-; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 3
 ; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
-; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_insertelement_v5f64_v_v_s:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, s2, 1
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, s2, 2
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, s2, 4
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v10 :: v_dual_cndmask_b32 v1, v1, v11
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 2
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, s2, 3
+; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 1
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v10 :: v_dual_cndmask_b32 v5, v5, v11
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11
+; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 3
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX11-NEXT:    v_dual_cndmask_b32 v6, v6, v10 :: v_dual_cndmask_b32 v7, v7, v11
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:
@@ -6436,20 +6437,20 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_v(<5 x double> %vec,
 ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_v:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v12
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v12
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 2, v12
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v12
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v12
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s[6:7]
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v12
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v12
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v12
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v11, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s[6:7]
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v0
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v1
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v2
@@ -6465,56 +6466,55 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_v(<5 x double> %vec,
 ; GFX10-LABEL: dyn_insertelement_v5f64_v_v_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v12
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v12
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 4, v12
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 4, v12
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v12
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v12
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s0
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v12
+; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
+; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v12
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
-; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_insertelement_v5f64_v_v_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v12
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 2, v12
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 4, v12
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v10 :: v_dual_cndmask_b32 v1, v1, v11
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v12
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v12
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
+; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v10 :: v_dual_cndmask_b32 v5, v5, v11
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
+; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v12
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX11-NEXT:    v_dual_cndmask_b32 v6, v6, v10 :: v_dual_cndmask_b32 v7, v7, v11
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
+; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
index 739671f8c43ae..e8ceeece372d4 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
@@ -581,42 +581,42 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX908-NEXT:    v_cmp_lt_i64_e64 s[14:15], s[6:7], 0
 ; GFX908-NEXT:    v_cmp_gt_i64_e64 s[16:17], s[6:7], -1
 ; GFX908-NEXT:    v_mov_b32_e32 v11, v5
-; GFX908-NEXT:    s_mov_b64 s[18:19], s[10:11]
+; GFX908-NEXT:    s_mov_b64 s[20:21], s[10:11]
 ; GFX908-NEXT:    v_mov_b32_e32 v10, v4
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
 ; GFX908-NEXT:    v_readfirstlane_b32 s5, v2
 ; GFX908-NEXT:    v_readfirstlane_b32 s9, v3
 ; GFX908-NEXT:    s_add_u32 s5, s5, 1
 ; GFX908-NEXT:    s_addc_u32 s9, s9, 0
-; GFX908-NEXT:    s_mul_hi_u32 s20, s2, s5
-; GFX908-NEXT:    s_mul_i32 s9, s2, s9
-; GFX908-NEXT:    s_mul_i32 s21, s3, s5
-; GFX908-NEXT:    s_add_i32 s9, s20, s9
-; GFX908-NEXT:    s_mul_i32 s5, s2, s5
-; GFX908-NEXT:    s_add_i32 s9, s9, s21
+; GFX908-NEXT:    s_mul_hi_u32 s19, s2, s5
+; GFX908-NEXT:    s_mul_i32 s22, s3, s5
+; GFX908-NEXT:    s_mul_i32 s18, s2, s5
+; GFX908-NEXT:    s_mul_i32 s5, s2, s9
+; GFX908-NEXT:    s_add_i32 s5, s19, s5
+; GFX908-NEXT:    s_add_i32 s5, s5, s22
 ; GFX908-NEXT:    s_branch .LBB3_5
 ; GFX908-NEXT:  .LBB3_4: ; %bb58
 ; GFX908-NEXT:    ; in Loop: Header=BB3_5 Depth=2
 ; GFX908-NEXT:    v_add_co_u32_sdwa v2, vcc, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
 ; GFX908-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
-; GFX908-NEXT:    s_add_u32 s18, s18, s0
-; GFX908-NEXT:    v_cmp_lt_i64_e64 s[22:23], -1, v[2:3]
-; GFX908-NEXT:    s_addc_u32 s19, s19, s1
-; GFX908-NEXT:    s_mov_b64 s[20:21], 0
-; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
+; GFX908-NEXT:    s_add_u32 s20, s20, s0
+; GFX908-NEXT:    v_cmp_lt_i64_e64 s[24:25], -1, v[2:3]
+; GFX908-NEXT:    s_addc_u32 s21, s21, s1
+; GFX908-NEXT:    s_mov_b64 s[22:23], 0
+; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[24:25]
 ; GFX908-NEXT:    s_cbranch_vccz .LBB3_9
 ; GFX908-NEXT:  .LBB3_5: ; %bb16
 ; GFX908-NEXT:    ; Parent Loop BB3_2 Depth=1
 ; GFX908-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX908-NEXT:    s_add_u32 s20, s18, s5
-; GFX908-NEXT:    s_addc_u32 s21, s19, s9
-; GFX908-NEXT:    global_load_dword v21, v19, s[20:21] offset:-12 glc
+; GFX908-NEXT:    s_add_u32 s22, s20, s18
+; GFX908-NEXT:    s_addc_u32 s23, s21, s5
+; GFX908-NEXT:    global_load_dword v21, v19, s[22:23] offset:-12 glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
-; GFX908-NEXT:    global_load_dword v20, v19, s[20:21] offset:-8 glc
+; GFX908-NEXT:    global_load_dword v20, v19, s[22:23] offset:-8 glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
-; GFX908-NEXT:    global_load_dword v12, v19, s[20:21] offset:-4 glc
+; GFX908-NEXT:    global_load_dword v12, v19, s[22:23] offset:-4 glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
-; GFX908-NEXT:    global_load_dword v12, v19, s[20:21] glc
+; GFX908-NEXT:    global_load_dword v12, v19, s[22:23] glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
 ; GFX908-NEXT:    ds_read_b64 v[12:13], v19
 ; GFX908-NEXT:    ds_read_b64 v[14:15], v0
@@ -645,11 +645,11 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX908-NEXT:    v_add_f32_e32 v7, v7, v15
 ; GFX908-NEXT:    v_add_f32_e32 v10, v10, v12
 ; GFX908-NEXT:    v_add_f32_e32 v11, v11, v13
-; GFX908-NEXT:    s_mov_b64 s[20:21], -1
+; GFX908-NEXT:    s_mov_b64 s[22:23], -1
 ; GFX908-NEXT:    s_branch .LBB3_4
 ; GFX908-NEXT:  .LBB3_7: ; in Loop: Header=BB3_5 Depth=2
-; GFX908-NEXT:    s_mov_b64 s[20:21], s[14:15]
-; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
+; GFX908-NEXT:    s_mov_b64 s[22:23], s[14:15]
+; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
 ; GFX908-NEXT:    s_cbranch_vccz .LBB3_4
 ; GFX908-NEXT:  ; %bb.8: ; in Loop: Header=BB3_2 Depth=1
 ; GFX908-NEXT:    ; implicit-def: $vgpr10_vgpr11
@@ -657,10 +657,10 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX908-NEXT:    ; implicit-def: $vgpr8_vgpr9
 ; GFX908-NEXT:    ; implicit-def: $vgpr4_vgpr5
 ; GFX908-NEXT:    ; implicit-def: $vgpr2_vgpr3
-; GFX908-NEXT:    ; implicit-def: $sgpr18_sgpr19
+; GFX908-NEXT:    ; implicit-def: $sgpr20_sgpr21
 ; GFX908-NEXT:  .LBB3_9: ; %loop.exit.guard
 ; GFX908-NEXT:    ; in Loop: Header=BB3_2 Depth=1
-; GFX908-NEXT:    s_xor_b64 s[16:17], s[20:21], -1
+; GFX908-NEXT:    s_xor_b64 s[16:17], s[22:23], -1
 ; GFX908-NEXT:  .LBB3_10: ; %Flow19
 ; GFX908-NEXT:    ; in Loop: Header=BB3_2 Depth=1
 ; GFX908-NEXT:    s_mov_b64 s[14:15], -1
@@ -742,47 +742,47 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], s[8:9], s[8:9] op_sel:[0,1]
 ; GFX90A-NEXT:    v_cmp_lt_i64_e64 s[14:15], s[6:7], 0
 ; GFX90A-NEXT:    v_cmp_gt_i64_e64 s[16:17], s[6:7], -1
-; GFX90A-NEXT:    s_mov_b64 s[18:19], s[10:11]
+; GFX90A-NEXT:    s_mov_b64 s[20:21], s[10:11]
 ; GFX90A-NEXT:    v_pk_mov_b32 v[12:13], v[6:7], v[6:7] op_sel:[0,1]
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_readfirstlane_b32 s5, v4
 ; GFX90A-NEXT:    v_readfirstlane_b32 s9, v5
 ; GFX90A-NEXT:    s_add_u32 s5, s5, 1
 ; GFX90A-NEXT:    s_addc_u32 s9, s9, 0
-; GFX90A-NEXT:    s_mul_hi_u32 s20, s2, s5
-; GFX90A-NEXT:    s_mul_i32 s9, s2, s9
-; GFX90A-NEXT:    s_mul_i32 s21, s3, s5
-; GFX90A-NEXT:    s_add_i32 s9, s20, s9
-; GFX90A-NEXT:    s_mul_i32 s5, s2, s5
-; GFX90A-NEXT:    s_add_i32 s9, s9, s21
+; GFX90A-NEXT:    s_mul_hi_u32 s19, s2, s5
+; GFX90A-NEXT:    s_mul_i32 s22, s3, s5
+; GFX90A-NEXT:    s_mul_i32 s18, s2, s5
+; GFX90A-NEXT:    s_mul_i32 s5, s2, s9
+; GFX90A-NEXT:    s_add_i32 s5, s19, s5
+; GFX90A-NEXT:    s_add_i32 s5, s5, s22
 ; GFX90A-NEXT:    s_branch .LBB3_5
 ; GFX90A-NEXT:  .LBB3_4: ; %bb58
 ; GFX90A-NEXT:    ; in Loop: Header=BB3_5 Depth=2
 ; GFX90A-NEXT:    v_add_co_u32_sdwa v4, vcc, v4, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
-; GFX90A-NEXT:    s_add_u32 s18, s18, s0
-; GFX90A-NEXT:    s_addc_u32 s19, s19, s1
-; GFX90A-NEXT:    v_cmp_lt_i64_e64 s[22:23], -1, v[4:5]
-; GFX90A-NEXT:    s_mov_b64 s[20:21], 0
-; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
+; GFX90A-NEXT:    s_add_u32 s20, s20, s0
+; GFX90A-NEXT:    s_addc_u32 s21, s21, s1
+; GFX90A-NEXT:    v_cmp_lt_i64_e64 s[24:25], -1, v[4:5]
+; GFX90A-NEXT:    s_mov_b64 s[22:23], 0
+; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[24:25]
 ; GFX90A-NEXT:    s_cbranch_vccz .LBB3_9
 ; GFX90A-NEXT:  .LBB3_5: ; %bb16
 ; GFX90A-NEXT:    ; Parent Loop BB3_2 Depth=1
 ; GFX90A-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX90A-NEXT:    s_add_u32 s20, s18, s5
-; GFX90A-NEXT:    s_addc_u32 s21, s19, s9
-; GFX90A-NEXT:    global_load_dword v21, v19, s[20:21] offset:-12 glc
+; GFX90A-NEXT:    s_add_u32 s22, s20, s18
+; GFX90A-NEXT:    s_addc_u32 s23, s21, s5
+; GFX90A-NEXT:    global_load_dword v21, v19, s[22:23] offset:-12 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    global_load_dword v20, v19, s[20:21] offset:-8 glc
+; GFX90A-NEXT:    global_load_dword v20, v19, s[22:23] offset:-8 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    global_load_dword v14, v19, s[20:21] offset:-4 glc
+; GFX90A-NEXT:    global_load_dword v14, v19, s[22:23] offset:-4 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    global_load_dword v14, v19, s[20:21] glc
+; GFX90A-NEXT:    global_load_dword v14, v19, s[22:23] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    ds_read_b64 v[14:15], v19
 ; GFX90A-NEXT:    ds_read_b64 v[16:17], v0
 ; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
-; GFX90A-NEXT:    ; kill: killed $sgpr20 killed $sgpr21
+; GFX90A-NEXT:    ; kill: killed $sgpr22 killed $sgpr23
 ; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX90A-NEXT:    s_cbranch_vccnz .LBB3_7
 ; GFX90A-NEXT:  ; %bb.6: ; %bb51
@@ -799,11 +799,11 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    v_pk_add_f32 v[10:11], v[10:11], v[26:27]
 ; GFX90A-NEXT:    v_pk_add_f32 v[8:9], v[8:9], v[16:17]
 ; GFX90A-NEXT:    v_pk_add_f32 v[12:13], v[12:13], v[14:15]
-; GFX90A-NEXT:    s_mov_b64 s[20:21], -1
+; GFX90A-NEXT:    s_mov_b64 s[22:23], -1
 ; GFX90A-NEXT:    s_branch .LBB3_4
 ; GFX90A-NEXT:  .LBB3_7: ; in Loop: Header=BB3_5 Depth=2
-; GFX90A-NEXT:    s_mov_b64 s[20:21], s[14:15]
-; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
+; GFX90A-NEXT:    s_mov_b64 s[22:23], s[14:15]
+; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
 ; GFX90A-NEXT:    s_cbranch_vccz .LBB3_4
 ; GFX90A-NEXT:  ; %bb.8: ; in Loop: Header=BB3_2 Depth=1
 ; GFX90A-NEXT:    ; implicit-def: $vgpr12_vgpr13
@@ -811,10 +811,10 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    ; implicit-def: $vgpr10_vgpr11
 ; GFX90A-NEXT:    ; implicit-def: $vgpr6_vgpr7
 ; GFX90A-NEXT:    ; implicit-def: $vgpr4_vgpr5
-; GFX90A-NEXT:    ; implicit-def: $sgpr18_sgpr19
+; GFX90A-NEXT:    ; implicit-def: $sgpr20_sgpr21
 ; GFX90A-NEXT:  .LBB3_9: ; %loop.exit.guard
 ; GFX90A-NEXT:    ; in Loop: Header=BB3_2 Depth=1
-; GFX90A-NEXT:    s_xor_b64 s[16:17], s[20:21], -1
+; GFX90A-NEXT:    s_xor_b64 s[16:17], s[22:23], -1
 ; GFX90A-NEXT:  .LBB3_10: ; %Flow19
 ; GFX90A-NEXT:    ; in Loop: Header=BB3_2 Depth=1
 ; GFX90A-NEXT:    s_mov_b64 s[14:15], -1

diff  --git a/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir b/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
index 2e802c75a4435..a480202e166f4 100644
--- a/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
@@ -18,7 +18,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -29,7 +29,7 @@ body:             |
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_CEIL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CEIL_F32_e32 [[V_CEIL_F32_e32_]], implicit $mode, implicit $exec
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec
   ; CHECK-NEXT:   S_BRANCH %bb.1
   bb.0:
     liveins: $vgpr0
@@ -102,7 +102,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -113,7 +113,7 @@ body:             |
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e32 0, [[V_MUL_F32_e32_]], implicit $mode, implicit $exec
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = nofpexcept V_MUL_F32_e32 0, %0.sub1, implicit $mode, implicit $exec
   ; CHECK-NEXT:   S_BRANCH %bb.1
   bb.0:
     liveins: $vgpr0
@@ -143,7 +143,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -154,7 +154,7 @@ body:             |
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e32 0, [[V_MUL_F32_e32_]], implicit $mode, implicit $exec
+  ; CHECK-NEXT:   %0.sub1:vreg_64 = nofpexcept V_MUL_F32_e32 0, %0.sub1, implicit $mode, implicit $exec
   ; CHECK-NEXT:   S_BRANCH %bb.1
   bb.0:
     liveins: $vgpr0
@@ -261,7 +261,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
@@ -269,7 +269,7 @@ body:             |
   ; CHECK-NEXT:   S_CBRANCH_EXECNZ %bb.1, implicit $exec
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
-  ; CHECK-NEXT:   S_NOP 0, implicit [[COPY]]
+  ; CHECK-NEXT:   S_NOP 0, implicit %0.sub1
   bb.0:
     liveins: $vgpr0
 
@@ -295,7 +295,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   dead [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   dead undef %2.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -336,7 +336,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   dead [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   dead undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -376,7 +376,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   dead [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   dead undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -417,7 +417,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -428,7 +428,7 @@ body:             |
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_CEIL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CEIL_F32_e32 [[V_CEIL_F32_e32_]], implicit $mode, implicit $exec
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec
   ; CHECK-NEXT:   S_BRANCH %bb.1
   bb.0:
     liveins: $vgpr0
@@ -458,7 +458,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = COPY $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
@@ -469,7 +469,7 @@ body:             |
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_CEIL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CEIL_F32_e32 [[V_CEIL_F32_e32_]], implicit $mode, implicit $exec
+  ; CHECK-NEXT:   %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec
   ; CHECK-NEXT:   S_BRANCH %bb.1
   bb.0:
     liveins: $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/dead-lane.mir b/llvm/test/CodeGen/AMDGPU/dead-lane.mir
index 5e2e15e596667..a18e647ad485a 100644
--- a/llvm/test/CodeGen/AMDGPU/dead-lane.mir
+++ b/llvm/test/CodeGen/AMDGPU/dead-lane.mir
@@ -1,11 +1,10 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -march=amdgcn -mcpu=tonga %s -start-before detect-dead-lanes -stop-before machine-scheduler -verify-machineinstrs -o - | FileCheck -check-prefix=GCN %s
 # RUN: llc -march=amdgcn -mcpu=tonga %s -start-before detect-dead-lanes -stop-before machine-scheduler -verify-machineinstrs -early-live-intervals -o - | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: dead_lane
 # GCN:      bb.0:
-# GCN-NEXT: %5:vgpr_32 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %5, implicit $mode, implicit $exec
-# GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %5,
+# GCN-NEXT: undef %3.sub0:vreg_64 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %3.sub0, implicit $mode, implicit $exec
+# GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %3.sub0,
 ---
 name:            dead_lane
 tracksRegLiveness: true

diff  --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 8a5fe071deaef..2699899845b3c 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -348,7 +348,6 @@
 ; GCN-O1-NEXT:        Machine Natural Loop Construction
 ; GCN-O1-NEXT:        Simple Register Coalescing
 ; GCN-O1-NEXT:        Rename Disconnected Subregister Components
-; GCN-O1-NEXT:        Rewrite Partial Register Uses
 ; GCN-O1-NEXT:        Machine Instruction Scheduler
 ; GCN-O1-NEXT:        MachinePostDominator Tree Construction
 ; GCN-O1-NEXT:        SI Whole Quad Mode
@@ -656,7 +655,6 @@
 ; GCN-O1-OPTS-NEXT:        Machine Natural Loop Construction
 ; GCN-O1-OPTS-NEXT:        Simple Register Coalescing
 ; GCN-O1-OPTS-NEXT:        Rename Disconnected Subregister Components
-; GCN-O1-OPTS-NEXT:        Rewrite Partial Register Uses
 ; GCN-O1-OPTS-NEXT:        AMDGPU Pre-RA optimizations
 ; GCN-O1-OPTS-NEXT:        Machine Instruction Scheduler
 ; GCN-O1-OPTS-NEXT:        MachinePostDominator Tree Construction
@@ -966,7 +964,6 @@
 ; GCN-O2-NEXT:        Machine Natural Loop Construction
 ; GCN-O2-NEXT:        Simple Register Coalescing
 ; GCN-O2-NEXT:        Rename Disconnected Subregister Components
-; GCN-O2-NEXT:        Rewrite Partial Register Uses
 ; GCN-O2-NEXT:        AMDGPU Pre-RA optimizations
 ; GCN-O2-NEXT:        Machine Instruction Scheduler
 ; GCN-O2-NEXT:        MachinePostDominator Tree Construction
@@ -1288,7 +1285,6 @@
 ; GCN-O3-NEXT:        Machine Natural Loop Construction
 ; GCN-O3-NEXT:        Simple Register Coalescing
 ; GCN-O3-NEXT:        Rename Disconnected Subregister Components
-; GCN-O3-NEXT:        Rewrite Partial Register Uses
 ; GCN-O3-NEXT:        AMDGPU Pre-RA optimizations
 ; GCN-O3-NEXT:        Machine Instruction Scheduler
 ; GCN-O3-NEXT:        MachinePostDominator Tree Construction

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
index beeed11715298..8d8a525ab9655 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
@@ -184,22 +184,22 @@ define { i64, i1 } @smulo_i64_v_v(i64 %x, i64 %y) {
 ; GFX10-NEXT:    v_mov_b32_e32 v5, v1
 ; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s4, v4, v2, 0
 ; GFX10-NEXT:    v_mad_u64_u32 v[6:7], s4, v4, v3, 0
-; GFX10-NEXT:    v_mad_u64_u32 v[8:9], s4, v5, v2, 0
-; GFX10-NEXT:    v_mad_i64_i32 v[10:11], s4, v5, v3, 0
-; GFX10-NEXT:    v_mov_b32_e32 v12, v1
-; GFX10-NEXT:    v_add3_u32 v1, v1, v6, v8
-; GFX10-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v6
+; GFX10-NEXT:    v_mad_u64_u32 v[9:10], s4, v5, v2, 0
+; GFX10-NEXT:    v_mad_i64_i32 v[11:12], s4, v5, v3, 0
+; GFX10-NEXT:    v_mov_b32_e32 v8, v1
+; GFX10-NEXT:    v_add3_u32 v1, v1, v6, v9
+; GFX10-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v6
 ; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v8
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v11, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v10
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v9
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v12, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v11
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
 ; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v7, v2
-; GFX10-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v9, vcc_lo
+; GFX10-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
 ; GFX10-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0, v5
 ; GFX10-NEXT:    v_cndmask_b32_e32 v6, v7, v2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v8, v10, vcc_lo
 ; GFX10-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; GFX10-NEXT:    v_sub_co_u32 v4, vcc_lo, v6, v4
 ; GFX10-NEXT:    v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v5, vcc_lo
@@ -219,28 +219,28 @@ define { i64, i1 } @smulo_i64_v_v(i64 %x, i64 %y) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v4, v2, 0
 ; GFX11-NEXT:    v_mad_u64_u32 v[6:7], null, v4, v3, 0
-; GFX11-NEXT:    v_mad_u64_u32 v[8:9], null, v5, v2, 0
-; GFX11-NEXT:    v_mad_i64_i32 v[10:11], null, v5, v3, 0
+; GFX11-NEXT:    v_mad_u64_u32 v[9:10], null, v5, v2, 0
+; GFX11-NEXT:    v_mad_i64_i32 v[11:12], null, v5, v3, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_mov_b32_e32 v12, v1
-; GFX11-NEXT:    v_add3_u32 v1, v1, v6, v8
+; GFX11-NEXT:    v_mov_b32_e32 v8, v1
+; GFX11-NEXT:    v_add3_u32 v1, v1, v6, v9
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v6
+; GFX11-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v6
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v8
+; GFX11-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v9
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v11, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v10
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v12, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v11
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
 ; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v7, v2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v9, vcc_lo
+; GFX11-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
 ; GFX11-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0, v5
 ; GFX11-NEXT:    v_cndmask_b32_e32 v6, v7, v2, vcc_lo
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, v8, v10, vcc_lo
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; GFX11-NEXT:    v_sub_co_u32 v4, vcc_lo, v6, v4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)

diff  --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index f1eaf0ffbce09..8a7cdf36accb7 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -6264,13 +6264,13 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out,
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v7, v3
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
 ; GCN-HSA-NEXT:    v_ashr_i64 v[14:15], v[0:1], 48
 ; GCN-HSA-NEXT:    v_bfe_i32 v12, v1, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v4, v0, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v2, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v6, v5, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[2:3], v[2:3], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v6, v6, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v10, v10, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-HSA-NEXT:    v_bfe_i32 v0, v7, 0, 16
@@ -6825,16 +6825,16 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, v3
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v4
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v13, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v14, 16, v0
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v0
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v8, v0, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v12, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[18:19], v[0:1], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v16, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[2:3], v[2:3], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v10, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[14:15], v[0:1], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v16, v10, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[18:19], v[2:3], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v12, v1, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v2, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v10, v17, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v2, v13, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v4, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v10, v14, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v14, v13, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v11, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v9, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[25:26], v[6:7], 48
@@ -6847,22 +6847,22 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v4, v6, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v6, v1, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
@@ -6914,32 +6914,32 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, v3
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[8:11]
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v17, 16, v0
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v16, 16, v2
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v1, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[10:11], v[2:3], 48
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v16, 16, v2
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[14:15], v[8:11]
 ; GCN-HSA-NEXT:    v_bfe_i32 v0, v0, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v2, v17, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-HSA-NEXT:    v_bfe_i32 v2, v3, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v10, v16, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v16, v7
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v17, 16, v6
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v28, 16, v4
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[8:11]
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[12:13], v[0:3]
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v4
 ; GCN-HSA-NEXT:    v_ashr_i64 v[14:15], v[4:5], 48
 ; GCN-HSA-NEXT:    v_bfe_i32 v12, v5, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[8:11]
-; GCN-HSA-NEXT:    v_bfe_i32 v0, v6, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v4, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v0, v6, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v10, v9, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[6:7], v[6:7], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v10, v28, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v2, v17, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-HSA-NEXT:    v_bfe_i32 v4, v16, 0, 16
@@ -6973,50 +6973,50 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v0, v1, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v5
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v12, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v11, 16, v2
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v10, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v5, 16, v2
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v2, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v5
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v7
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v22, v1, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v4
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v13, v3
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, v3
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v6
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v22, v7
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v5, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v27, v6, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v13, 16, v6
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v10, v9, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, v7
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v6, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v4, v4, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v6, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v10, v9, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v11, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v13, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v3, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v23, v22, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v25, v7, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v29, v20, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v16, v11, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v9, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v26, v13, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:80
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v30, 31, v29
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
@@ -7957,70 +7957,71 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, v3
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v24, v7
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v25, v11
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v26, v15
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v19, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v4
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v10
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v18, 16, v8
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v27, 16, v14
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v20, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[2:3], 48
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, v3
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, v7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, v11
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, v15
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v2
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v17, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[2:3], 48
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:240
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[0:1], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:208
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v12
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[0:1], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v1, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v24, 16, v4
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v24, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[6:7], 48
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v21, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[6:7], 48
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:176
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[4:5], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v5, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[4:5], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v5, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v21, 16, v10
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[10:11], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v25, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v22, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[10:11], 48
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[8:9], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v9, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[8:9], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v9, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v8
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[14:15], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v26, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v23, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[14:15], 48
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[22:23], v[12:13], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v13, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[12:13], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v13, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v14
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v16, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v2, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:224
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v12
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v3, v1, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v1, v12, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v5, v14, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v3, v3, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v19, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:224
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v7, v7, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v9, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v9, v8, 0, 16
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v10, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v7, v27, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v18, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v17, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v19, v16, 0, 16
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v21, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v19, v24, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v4, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v6
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v21, v6, 0, 16
@@ -8075,154 +8076,154 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    flat_load_dwordx4 v[12:15], v[12:13]
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xf0
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xd0
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xf0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xb0
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xd0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s0
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(3)
 ; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[4:5], 48
 ; GCN-HSA-NEXT:    v_bfe_i32 v16, v5, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x90
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xb0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x70
-; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
-; GCN-HSA-NEXT:    s_add_u32 s6, s0, 0x50
-; GCN-HSA-NEXT:    s_addc_u32 s7, s1, 0
+; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x90
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, v7
-; GCN-HSA-NEXT:    s_add_u32 s8, s0, 32
+; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
 ; GCN-HSA-NEXT:    v_bfe_i32 v16, v5, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[6:7], 48
-; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
+; GCN-HSA-NEXT:    s_add_u32 s6, s0, 0x70
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v6
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[16:19]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s9
+; GCN-HSA-NEXT:    s_addc_u32 s7, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
+; GCN-HSA-NEXT:    s_add_u32 s8, s0, 0x50
 ; GCN-HSA-NEXT:    v_bfe_i32 v18, v5, 0, 16
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v4
 ; GCN-HSA-NEXT:    v_bfe_i32 v16, v6, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s8
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[16:19]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s3
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(5)
-; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[0:1], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v16, v1, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, v3
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[16:19]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s2
-; GCN-HSA-NEXT:    v_bfe_i32 v16, v1, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[2:3], 48
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[16:19]
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v1, 16, v4
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(6)
-; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[8:9], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v16, v9, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    v_bfe_i32 v3, v4, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, v11
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[5:6], v[16:19]
 ; GCN-HSA-NEXT:    v_bfe_i32 v4, v4, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[6:7], v[10:11], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v6, v5, 0, 16
+; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s7
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[4:7]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s6
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(7)
-; GCN-HSA-NEXT:    v_ashr_i64 v[6:7], v[12:13], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v4, v13, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
+; GCN-HSA-NEXT:    s_add_u32 s10, s0, 32
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[4:7]
+; GCN-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(5)
+; GCN-HSA-NEXT:    v_ashr_i64 v[6:7], v[0:1], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v4, v1, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, v3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s11
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[4:7]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s5
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, v15
-; GCN-HSA-NEXT:    v_bfe_i32 v4, v4, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[6:7], v[14:15], 48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s10
+; GCN-HSA-NEXT:    v_bfe_i32 v4, v1, 0, 16
+; GCN-HSA-NEXT:    v_ashr_i64 v[6:7], v[2:3], 48
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v17, 16, v14
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[4:7]
-; GCN-HSA-NEXT:    v_bfe_i32 v15, v14, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v5, v1, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s1
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v26, 16, v2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v25, 16, v8
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[4:7]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s5
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(7)
+; GCN-HSA-NEXT:    v_ashr_i64 v[5:6], v[8:9], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v3, v9, 0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s4
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[3:6]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, v11
+; GCN-HSA-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; GCN-HSA-NEXT:    v_ashr_i64 v[5:6], v[10:11], 48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s2
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s9
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[3:6]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s8
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(8)
+; GCN-HSA-NEXT:    v_ashr_i64 v[5:6], v[12:13], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v3, v13, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[3:6]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, v15
+; GCN-HSA-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; GCN-HSA-NEXT:    v_ashr_i64 v[5:6], v[14:15], 48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s6
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v25, 16, v2
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s0
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xe0
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v22, 16, v0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[13:14], v[3:6]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[3:6]
 ; GCN-HSA-NEXT:    v_bfe_i32 v19, v0, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v23, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v5, v25, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v25, v26, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v21, v1, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v25, v25, 0, 16
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xc0
-; GCN-HSA-NEXT:    v_bfe_i32 v21, v22, 0, 16
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[23:26]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v10
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v10
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xa0
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v10, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v9, v9, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v15, v10, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v17, v18, 0, 16
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[19:22]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_bfe_i32 v3, v8, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v8
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v7, 16, v14
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x80
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[7:10]
+; GCN-HSA-NEXT:    v_bfe_i32 v13, v7, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v7, v8, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v9, v9, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[15:18]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v12
-; GCN-HSA-NEXT:    v_bfe_i32 v17, v17, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[3:6]
+; GCN-HSA-NEXT:    v_bfe_i32 v11, v14, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[7:10]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT:    v_bfe_i32 v13, v18, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v12
+; GCN-HSA-NEXT:    v_bfe_i32 v3, v12, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s0, s0, 64
-; GCN-HSA-NEXT:    v_bfe_i32 v11, v12, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[15:18]
+; GCN-HSA-NEXT:    v_bfe_i32 v5, v5, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[11:14]
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[11:14]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[3:6]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v32i16_to_v32i64:
@@ -8242,70 +8243,70 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[0:1]
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:208
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v22, v3
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[22:23], 48, v[0:1]
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:208
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v19, v3
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[4:5]
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v5, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:144
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v20, v7
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[22:23], 48, v[4:5]
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:144
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v19, 0, 16
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[8:9]
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v9, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v19, v11
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[22:23], 48, v[8:9]
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v9, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v18, v7
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[12:13]
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v13, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v17, 16, v2
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v22, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[2:3]
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:240
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v18, v15
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v20, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[6:7]
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:176
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v17, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[10:11]
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v19, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v23, 16, v8
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v28, 16, v14
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v12
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v1, v12, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[26:27], 48, v[14:15]
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v18, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v5, v14, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[22:23], 48, v[12:13]
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v13, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v18, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[21:22], 48, v[2:3]
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0 offset:240
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, v11
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[20:21], 48, v[6:7]
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:176
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v17, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[19:20], 48, v[10:11]
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v16, v15
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v16, v16, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i64 v[18:19], 48, v[14:15]
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v2, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v6
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:224
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v16, 16, v4
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v23, 0, 16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v23, v2, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v1, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v22, 16, v4
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v21, 16, v10
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v8
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v7, 16, v14
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v12
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v25, v0, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v27, v2, 0, 16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:224
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v3, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v1, v12, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v5, v14, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v7, v7, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v9, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v9, v8, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v10, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v7, v28, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v21, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v16, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v22, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v4, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v6, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25

diff  --git a/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll b/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
index e0d6875e5adde..6c858efaf37d2 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
@@ -8,11 +8,11 @@
 define <3 x float> @liveout_undef_subrange(<3 x float> %arg) {
 ; CHECK-LABEL: liveout_undef_subrange:
 ; CHECK:       ; %bb.0: ; %bb
-; CHECK-NEXT:    ; kill: killed $vgpr1
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_f32_e32 v1, v2, v2
-; CHECK-NEXT:    s_mov_b64 s[4:5], 0
+; CHECK-NEXT:    v_add_f32_e32 v3, v2, v2
 ; CHECK-NEXT:    v_add_f32_e32 v0, v0, v0
+; CHECK-NEXT:    s_mov_b64 s[4:5], 0
+; CHECK-NEXT:    ; kill: killed $vgpr1
 ; CHECK-NEXT:  .LBB0_1: ; %bb1
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    v_cmp_neq_f32_e32 vcc, 0, v2
@@ -22,7 +22,7 @@ define <3 x float> @liveout_undef_subrange(<3 x float> %arg) {
 ; CHECK-NEXT:  ; %bb.2: ; %bb2
 ; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    s_or_b64 exec, exec, s[4:5]
-; CHECK-NEXT:    v_mul_f32_e32 v2, v1, v2
+; CHECK-NEXT:    v_mul_f32_e32 v2, v3, v2
 ; CHECK-NEXT:    s_mov_b64 s[4:5], 0
 ; CHECK-NEXT:    s_cbranch_execnz .LBB0_1
 ; CHECK-NEXT:  ; %bb.3: ; %DummyReturnBlock

diff  --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index 77955cea49a2d..26a9043a1b779 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -159,24 +159,24 @@ define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CI-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v0, v1, 0
-; CI-NEXT:    v_ashrrev_i32_e32 v12, 31, v0
+; CI-NEXT:    v_ashrrev_i32_e32 v13, 31, v0
 ; CI-NEXT:    v_mov_b32_e32 v8, 0
-; CI-NEXT:    v_mad_u64_u32 v[9:10], s[4:5], v12, v1, v[7:8]
-; CI-NEXT:    v_ashrrev_i32_e32 v13, 31, v1
-; CI-NEXT:    v_mov_b32_e32 v11, v10
+; CI-NEXT:    v_mad_u64_u32 v[9:10], s[4:5], v13, v1, v[7:8]
+; CI-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
+; CI-NEXT:    v_mad_i64_i32 v[11:12], s[4:5], v1, v13, 0
+; CI-NEXT:    v_mov_b32_e32 v7, v10
 ; CI-NEXT:    v_mov_b32_e32 v10, v8
-; CI-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], v0, v13, v[9:10]
-; CI-NEXT:    v_add_i32_e32 v8, vcc, v11, v8
-; CI-NEXT:    v_mad_i64_i32 v[10:11], s[4:5], v1, v12, 0
-; CI-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, 0, vcc
-; CI-NEXT:    v_mad_u64_u32 v[8:9], s[4:5], v12, v13, v[8:9]
-; CI-NEXT:    v_mad_i64_i32 v[0:1], s[4:5], v13, v0, v[10:11]
-; CI-NEXT:    v_add_i32_e32 v8, vcc, v8, v0
-; CI-NEXT:    v_addc_u32_e32 v9, vcc, v9, v1, vcc
-; CI-NEXT:    v_mov_b32_e32 v1, v7
+; CI-NEXT:    v_mad_u64_u32 v[8:9], s[4:5], v0, v14, v[9:10]
+; CI-NEXT:    v_mad_i64_i32 v[0:1], s[4:5], v14, v0, v[11:12]
+; CI-NEXT:    v_add_i32_e32 v9, vcc, v7, v9
+; CI-NEXT:    v_addc_u32_e64 v10, s[4:5], 0, 0, vcc
+; CI-NEXT:    v_mad_u64_u32 v[9:10], s[4:5], v13, v14, v[9:10]
+; CI-NEXT:    v_add_i32_e32 v7, vcc, v9, v0
+; CI-NEXT:    v_addc_u32_e32 v9, vcc, v10, v1, vcc
+; CI-NEXT:    v_mov_b32_e32 v1, v8
 ; CI-NEXT:    v_add_i32_e32 v0, vcc, v6, v2
 ; CI-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; CI-NEXT:    v_addc_u32_e32 v2, vcc, v8, v4, vcc
+; CI-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; CI-NEXT:    v_addc_u32_e32 v3, vcc, v9, v5, vcc
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -250,24 +250,26 @@ define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v15, 31, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mad_u64_u32 v[9:10], null, v14, v1, v[7:8]
-; GFX11-NEXT:    v_dual_mov_b32 v11, v10 :: v_dual_mov_b32 v10, v8
+; GFX11-NEXT:    v_dual_mov_b32 v7, v10 :: v_dual_mov_b32 v10, v8
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_mad_u64_u32 v[11:12], null, v0, v15, v[9:10]
+; GFX11-NEXT:    v_mad_i64_i32 v[9:10], null, v1, v14, 0
+; GFX11-NEXT:    v_mov_b32_e32 v8, v12
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_mad_i64_i32 v[12:13], null, v15, v0, v[9:10]
+; GFX11-NEXT:    v_add_co_u32 v7, s0, v7, v8
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_mad_u64_u32 v[7:8], null, v0, v15, v[9:10]
-; GFX11-NEXT:    v_mov_b32_e32 v10, v8
-; GFX11-NEXT:    v_mad_i64_i32 v[8:9], null, v1, v14, 0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v10, s0, v11, v10
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v11, null, 0, 0, s0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_mad_i64_i32 v[12:13], null, v15, v0, v[8:9]
-; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v14, v15, v[10:11]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, 0, 0, s0
+; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v14, v15, v[7:8]
+; GFX11-NEXT:    v_mov_b32_e32 v7, v11
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_add_co_u32 v8, vcc_lo, v0, v12
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, v1, v13, vcc_lo
 ; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v6, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v7, v3, vcc_lo
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v7, v3, vcc_lo
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, v8, v4, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v9, v5, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %sext0 = sext i32 %arg0 to i128

diff  --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 6d0e2caccf821..f214e9aecea5a 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -2185,22 +2185,22 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
 ; VI-NEXT:    v_mad_u64_u32 v[12:13], s[0:1], v4, v2, 0
 ; VI-NEXT:    v_mul_lo_u32 v14, v5, v2
 ; VI-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v0, v4, 0
-; VI-NEXT:    v_add_u32_e32 v13, vcc, v13, v10
+; VI-NEXT:    v_mul_lo_u32 v15, v7, v0
+; VI-NEXT:    v_add_u32_e32 v7, vcc, v13, v10
 ; VI-NEXT:    v_mov_b32_e32 v10, v3
 ; VI-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v1, v4, v[10:11]
-; VI-NEXT:    v_add_u32_e32 v13, vcc, v13, v14
-; VI-NEXT:    v_mov_b32_e32 v10, v4
+; VI-NEXT:    v_add_u32_e32 v13, vcc, v7, v14
+; VI-NEXT:    v_mov_b32_e32 v7, v4
 ; VI-NEXT:    v_mov_b32_e32 v4, v11
-; VI-NEXT:    v_mul_lo_u32 v7, v7, v0
 ; VI-NEXT:    v_mad_u64_u32 v[12:13], s[0:1], v6, v0, v[12:13]
 ; VI-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v0, v5, v[3:4]
-; VI-NEXT:    v_add_u32_e32 v13, vcc, v7, v13
+; VI-NEXT:    v_add_u32_e32 v11, vcc, v15, v13
 ; VI-NEXT:    v_mov_b32_e32 v0, v4
-; VI-NEXT:    v_mul_lo_u32 v11, v6, v1
-; VI-NEXT:    v_add_u32_e32 v6, vcc, v10, v0
+; VI-NEXT:    v_mul_lo_u32 v10, v6, v1
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v7, v0
 ; VI-NEXT:    v_addc_u32_e64 v7, s[0:1], 0, 0, vcc
 ; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v1, v5, v[6:7]
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v11, v13
+; VI-NEXT:    v_add_u32_e32 v5, vcc, v10, v11
 ; VI-NEXT:    v_add_u32_e32 v4, vcc, v0, v12
 ; VI-NEXT:    v_addc_u32_e32 v5, vcc, v1, v5, vcc
 ; VI-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
@@ -2220,18 +2220,18 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
 ; GFX9-NEXT:    v_mul_lo_u32 v15, v4, v3
 ; GFX9-NEXT:    v_mad_u64_u32 v[11:12], s[0:1], v1, v4, v[9:10]
 ; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v4, v2, 0
-; GFX9-NEXT:    v_mul_lo_u32 v16, v7, v0
-; GFX9-NEXT:    v_mov_b32_e32 v7, v12
+; GFX9-NEXT:    v_mov_b32_e32 v4, v12
 ; GFX9-NEXT:    v_mov_b32_e32 v12, v10
 ; GFX9-NEXT:    v_mad_u64_u32 v[9:10], s[0:1], v0, v5, v[11:12]
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v15, v14
+; GFX9-NEXT:    v_mul_lo_u32 v17, v7, v0
 ; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v6, v0, v[2:3]
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v10
-; GFX9-NEXT:    v_mul_lo_u32 v4, v6, v1
-; GFX9-NEXT:    v_add_co_u32_e32 v6, vcc, v7, v0
+; GFX9-NEXT:    v_mul_lo_u32 v16, v6, v1
+; GFX9-NEXT:    v_add_co_u32_e32 v6, vcc, v4, v0
 ; GFX9-NEXT:    v_addc_co_u32_e64 v7, s[0:1], 0, 0, vcc
 ; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v1, v5, v[6:7]
-; GFX9-NEXT:    v_add3_u32 v3, v16, v3, v4
+; GFX9-NEXT:    v_add3_u32 v3, v17, v3, v16
 ; GFX9-NEXT:    v_add_co_u32_e32 v10, vcc, v0, v2
 ; GFX9-NEXT:    v_addc_co_u32_e32 v11, vcc, v1, v3, vcc
 ; GFX9-NEXT:    global_store_dwordx4 v13, v[8:11], s[2:3]
@@ -2240,69 +2240,72 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
 ; GFX10-LABEL: v_mul_i128:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
-; GFX10-NEXT:    v_lshlrev_b32_e32 v13, 4, v0
+; GFX10-NEXT:    v_lshlrev_b32_e32 v14, 4, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v10, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v13, s[0:1]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v13, s[2:3]
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v14, s[0:1]
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v14, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    v_mad_u64_u32 v[8:9], s0, v0, v4, 0
-; GFX10-NEXT:    v_mul_lo_u32 v15, v5, v2
 ; GFX10-NEXT:    v_mul_lo_u32 v7, v7, v0
 ; GFX10-NEXT:    v_mad_u64_u32 v[11:12], s0, v1, v4, v[9:10]
-; GFX10-NEXT:    v_mov_b32_e32 v14, v12
+; GFX10-NEXT:    v_mov_b32_e32 v9, v12
 ; GFX10-NEXT:    v_mov_b32_e32 v12, v10
-; GFX10-NEXT:    v_mad_u64_u32 v[9:10], s0, v0, v5, v[11:12]
+; GFX10-NEXT:    v_mul_lo_u32 v10, v5, v2
+; GFX10-NEXT:    v_mad_u64_u32 v[12:13], s0, v0, v5, v[11:12]
 ; GFX10-NEXT:    v_mul_lo_u32 v11, v4, v3
 ; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s0, v4, v2, 0
-; GFX10-NEXT:    v_mul_lo_u32 v12, v6, v1
-; GFX10-NEXT:    v_mov_b32_e32 v4, v10
-; GFX10-NEXT:    v_add3_u32 v3, v3, v11, v15
-; GFX10-NEXT:    v_add_co_u32 v10, s0, v14, v4
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v11, s0, 0, 0, s0
+; GFX10-NEXT:    v_mov_b32_e32 v4, v13
+; GFX10-NEXT:    v_mul_lo_u32 v13, v6, v1
+; GFX10-NEXT:    v_add3_u32 v3, v3, v11, v10
+; GFX10-NEXT:    v_add_co_u32 v9, s0, v9, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v10, s0, 0, 0, s0
 ; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s0, v6, v0, v[2:3]
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, v1, v5, v[10:11]
-; GFX10-NEXT:    v_add3_u32 v3, v7, v3, v12
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, v1, v5, v[9:10]
+; GFX10-NEXT:    v_mov_b32_e32 v9, v12
+; GFX10-NEXT:    v_add3_u32 v3, v7, v3, v13
 ; GFX10-NEXT:    v_add_co_u32 v10, vcc_lo, v0, v2
 ; GFX10-NEXT:    v_add_co_ci_u32_e32 v11, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    global_store_dwordx4 v13, v[8:11], s[2:3]
+; GFX10-NEXT:    global_store_dwordx4 v14, v[8:11], s[2:3]
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_mul_i128:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_load_b128 s[0:3], s[0:1], 0x2c
-; GFX11-NEXT:    v_dual_mov_b32 v10, 0 :: v_dual_lshlrev_b32 v15, 4, v0
+; GFX11-NEXT:    v_lshlrev_b32_e32 v16, 4, v0
+; GFX11-NEXT:    v_mov_b32_e32 v10, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_load_b128 v[0:3], v15, s[0:1]
-; GFX11-NEXT:    global_load_b128 v[4:7], v15, s[2:3]
+; GFX11-NEXT:    global_load_b128 v[0:3], v16, s[0:1]
+; GFX11-NEXT:    global_load_b128 v[4:7], v16, s[2:3]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    v_mad_u64_u32 v[8:9], null, v0, v4, 0
-; GFX11-NEXT:    v_mul_lo_u32 v14, v5, v2
+; GFX11-NEXT:    v_mul_lo_u32 v15, v5, v2
 ; GFX11-NEXT:    v_mul_lo_u32 v3, v4, v3
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mad_u64_u32 v[11:12], null, v1, v4, v[9:10]
-; GFX11-NEXT:    v_dual_mov_b32 v13, v12 :: v_dual_mov_b32 v12, v10
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_mad_u64_u32 v[9:10], null, v0, v5, v[11:12]
-; GFX11-NEXT:    v_mad_u64_u32 v[11:12], null, v4, v2, 0
+; GFX11-NEXT:    v_dual_mov_b32 v9, v12 :: v_dual_mov_b32 v12, v10
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_mad_u64_u32 v[13:14], null, v0, v5, v[11:12]
+; GFX11-NEXT:    v_mad_u64_u32 v[10:11], null, v4, v2, 0
 ; GFX11-NEXT:    v_mul_lo_u32 v4, v6, v1
-; GFX11-NEXT:    v_mov_b32_e32 v2, v10
-; GFX11-NEXT:    v_mul_lo_u32 v10, v7, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_add3_u32 v12, v12, v3, v14
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v13, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_mul_lo_u32 v12, v7, v0
+; GFX11-NEXT:    v_mov_b32_e32 v2, v14
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_add3_u32 v11, v11, v3, v15
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v9, v2
+; GFX11-NEXT:    v_mov_b32_e32 v9, v13
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, 0, 0, s0
-; GFX11-NEXT:    v_mad_u64_u32 v[13:14], null, v6, v0, v[11:12]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_mad_u64_u32 v[14:15], null, v6, v0, v[10:11]
 ; GFX11-NEXT:    v_mad_u64_u32 v[6:7], null, v1, v5, v[2:3]
-; GFX11-NEXT:    v_add3_u32 v0, v10, v14, v4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v10, vcc_lo, v6, v13
+; GFX11-NEXT:    v_add3_u32 v0, v12, v15, v4
+; GFX11-NEXT:    v_add_co_u32 v10, vcc_lo, v6, v14
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v11, vcc_lo, v7, v0, vcc_lo
-; GFX11-NEXT:    global_store_b128 v15, v[8:11], s[2:3]
+; GFX11-NEXT:    global_store_b128 v16, v[8:11], s[2:3]
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 54dc55c7fd500..be742480a59bd 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -437,96 +437,96 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX8-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v2, 0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 17, v0
-; GFX8-NEXT:    v_lshlrev_b64 v[0:1], 3, v[1:2]
-; GFX8-NEXT:    v_and_b32_e32 v6, 0xfe000000, v3
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v6
-; GFX8-NEXT:    v_mov_b32_e32 v2, s35
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s34, v0
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v2, vcc
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 17, v0
+; GFX8-NEXT:    v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX8-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
+; GFX8-NEXT:    v_or_b32_e32 v1, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v3, s35
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s34, v1
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
 ; GFX8-NEXT:    s_movk_i32 s0, 0x5000
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; GFX8-NEXT:    v_mov_b32_e32 v2, 0
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s0, v1
 ; GFX8-NEXT:    v_mov_b32_e32 v3, 0
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX8-NEXT:    s_movk_i32 s0, 0x7f
 ; GFX8-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX8-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX8-NEXT:    ; Child Loop BB1_2 Depth 2
+; GFX8-NEXT:    v_mov_b32_e32 v6, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v5, v1
-; GFX8-NEXT:    v_mov_b32_e32 v4, v0
 ; GFX8-NEXT:    s_mov_b32 s1, 0
 ; GFX8-NEXT:  .LBB1_2: ; %for.body
 ; GFX8-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX8-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0xffffb000, v4
-; GFX8-NEXT:    v_addc_u32_e32 v8, vcc, -1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0xffffb800, v4
-; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, -1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v11, vcc, 0xffffc000, v4
+; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0xffffb000, v5
+; GFX8-NEXT:    v_addc_u32_e32 v8, vcc, -1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0xffffb800, v5
+; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, -1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v11, vcc, 0xffffc000, v5
 ; GFX8-NEXT:    flat_load_dwordx2 v[7:8], v[7:8]
 ; GFX8-NEXT:    flat_load_dwordx2 v[9:10], v[9:10]
-; GFX8-NEXT:    v_addc_u32_e32 v12, vcc, -1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v13, vcc, 0xffffc800, v4
-; GFX8-NEXT:    v_addc_u32_e32 v14, vcc, -1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 0xffffd000, v4
+; GFX8-NEXT:    v_addc_u32_e32 v12, vcc, -1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v13, vcc, 0xffffc800, v5
+; GFX8-NEXT:    v_addc_u32_e32 v14, vcc, -1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 0xffffd000, v5
 ; GFX8-NEXT:    flat_load_dwordx2 v[11:12], v[11:12]
 ; GFX8-NEXT:    flat_load_dwordx2 v[13:14], v[13:14]
-; GFX8-NEXT:    v_addc_u32_e32 v16, vcc, -1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v17, vcc, 0xffffd800, v4
-; GFX8-NEXT:    v_addc_u32_e32 v18, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v16, vcc, -1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v17, vcc, 0xffffd800, v5
+; GFX8-NEXT:    v_addc_u32_e32 v18, vcc, -1, v6, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[15:16], v[15:16]
 ; GFX8-NEXT:    flat_load_dwordx2 v[17:18], v[17:18]
-; GFX8-NEXT:    v_add_u32_e32 v19, vcc, 0xffffe000, v4
-; GFX8-NEXT:    v_addc_u32_e32 v20, vcc, -1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v21, vcc, 0xffffe800, v4
+; GFX8-NEXT:    v_add_u32_e32 v19, vcc, 0xffffe000, v5
+; GFX8-NEXT:    v_addc_u32_e32 v20, vcc, -1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v21, vcc, 0xffffe800, v5
 ; GFX8-NEXT:    flat_load_dwordx2 v[19:20], v[19:20]
-; GFX8-NEXT:    v_addc_u32_e32 v22, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v22, vcc, -1, v6, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[21:22], v[21:22]
-; GFX8-NEXT:    v_add_u32_e32 v23, vcc, 0xfffff000, v4
-; GFX8-NEXT:    v_addc_u32_e32 v24, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v23, vcc, 0xfffff000, v5
+; GFX8-NEXT:    v_addc_u32_e32 v24, vcc, -1, v6, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[23:24], v[23:24]
-; GFX8-NEXT:    v_add_u32_e32 v25, vcc, 0xfffff800, v4
-; GFX8-NEXT:    v_addc_u32_e32 v26, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v25, vcc, 0xfffff800, v5
+; GFX8-NEXT:    v_addc_u32_e32 v26, vcc, -1, v6, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[25:26], v[25:26]
-; GFX8-NEXT:    flat_load_dwordx2 v[27:28], v[4:5]
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x10000, v4
-; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[27:28], v[5:6]
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x10000, v5
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
 ; GFX8-NEXT:    s_addk_i32 s1, 0x2000
 ; GFX8-NEXT:    s_cmp_gt_u32 s1, 0x3fffff
 ; GFX8-NEXT:    s_waitcnt vmcnt(10)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v7, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v7, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(9)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v9, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v10, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v9, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v10, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(8)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v11, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v12, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v11, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v12, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(7)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v13, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v14, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v13, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v14, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(6)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v15, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v16, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v15, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v16, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(5)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v17, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v18, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v17, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v18, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(4)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v19, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v20, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v19, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v20, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(3)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v21, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v22, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v21, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v22, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(2)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v23, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v24, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v23, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v24, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v25, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v26, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v25, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v26, v4, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v27, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v28, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v27, v3
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v28, v4, vcc
 ; GFX8-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX8-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX8-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -538,9 +538,9 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX8-NEXT:    s_branch .LBB1_1
 ; GFX8-NEXT:  .LBB1_5: ; %while.end
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s35
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s34, v6
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s34, v0
 ; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[3:4]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX900-LABEL: clmem_read:
@@ -565,86 +565,86 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX900-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GFX900-NEXT:    v_and_b32_e32 v1, 0xff, v0
 ; GFX900-NEXT:    v_mov_b32_e32 v2, 0
-; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 17, v0
-; GFX900-NEXT:    v_lshlrev_b64 v[0:1], 3, v[1:2]
-; GFX900-NEXT:    v_and_b32_e32 v6, 0xfe000000, v3
-; GFX900-NEXT:    v_or_b32_e32 v0, v0, v6
-; GFX900-NEXT:    v_mov_b32_e32 v2, s35
-; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s34, v0
-; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 17, v0
+; GFX900-NEXT:    v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX900-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
+; GFX900-NEXT:    v_or_b32_e32 v1, v1, v0
+; GFX900-NEXT:    v_mov_b32_e32 v3, s35
+; GFX900-NEXT:    v_add_co_u32_e32 v1, vcc, s34, v1
+; GFX900-NEXT:    v_addc_co_u32_e32 v2, vcc, v2, v3, vcc
 ; GFX900-NEXT:    s_movk_i32 s0, 0x5000
-; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX900-NEXT:    v_mov_b32_e32 v2, 0
-; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX900-NEXT:    s_movk_i32 s2, 0x7f
+; GFX900-NEXT:    v_add_co_u32_e32 v1, vcc, s0, v1
 ; GFX900-NEXT:    v_mov_b32_e32 v3, 0
+; GFX900-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX900-NEXT:    s_movk_i32 s2, 0x7f
+; GFX900-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX900-NEXT:    s_movk_i32 s0, 0xd000
 ; GFX900-NEXT:    s_movk_i32 s1, 0xe000
 ; GFX900-NEXT:    s_movk_i32 s3, 0xf000
 ; GFX900-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX900-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX900-NEXT:    ; Child Loop BB1_2 Depth 2
+; GFX900-NEXT:    v_mov_b32_e32 v6, v2
 ; GFX900-NEXT:    v_mov_b32_e32 v5, v1
-; GFX900-NEXT:    v_mov_b32_e32 v4, v0
 ; GFX900-NEXT:    s_mov_b32 s4, 0
 ; GFX900-NEXT:  .LBB1_2: ; %for.body
 ; GFX900-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX900-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX900-NEXT:    v_add_co_u32_e32 v7, vcc, 0xffffb000, v4
-; GFX900-NEXT:    v_addc_co_u32_e32 v8, vcc, -1, v5, vcc
-; GFX900-NEXT:    global_load_dwordx2 v[9:10], v[4:5], off offset:-4096
-; GFX900-NEXT:    global_load_dwordx2 v[11:12], v[4:5], off offset:-2048
-; GFX900-NEXT:    v_add_co_u32_e32 v13, vcc, 0xffffc000, v4
+; GFX900-NEXT:    v_add_co_u32_e32 v7, vcc, 0xffffb000, v5
+; GFX900-NEXT:    v_addc_co_u32_e32 v8, vcc, -1, v6, vcc
+; GFX900-NEXT:    global_load_dwordx2 v[9:10], v[5:6], off offset:-4096
+; GFX900-NEXT:    global_load_dwordx2 v[11:12], v[5:6], off offset:-2048
+; GFX900-NEXT:    v_add_co_u32_e32 v13, vcc, 0xffffc000, v5
 ; GFX900-NEXT:    global_load_dwordx2 v[7:8], v[7:8], off
-; GFX900-NEXT:    v_addc_co_u32_e32 v14, vcc, -1, v5, vcc
+; GFX900-NEXT:    v_addc_co_u32_e32 v14, vcc, -1, v6, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[17:18], v[13:14], off offset:-2048
-; GFX900-NEXT:    v_add_co_u32_e32 v15, vcc, s0, v4
-; GFX900-NEXT:    v_addc_co_u32_e32 v16, vcc, -1, v5, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v15, vcc, s0, v5
+; GFX900-NEXT:    v_addc_co_u32_e32 v16, vcc, -1, v6, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[15:16], v[15:16], off offset:-2048
-; GFX900-NEXT:    v_add_co_u32_e32 v19, vcc, s1, v4
+; GFX900-NEXT:    v_add_co_u32_e32 v19, vcc, s1, v5
 ; GFX900-NEXT:    global_load_dwordx2 v[13:14], v[13:14], off
-; GFX900-NEXT:    v_addc_co_u32_e32 v20, vcc, -1, v5, vcc
+; GFX900-NEXT:    v_addc_co_u32_e32 v20, vcc, -1, v6, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[23:24], v[19:20], off offset:-4096
 ; GFX900-NEXT:    global_load_dwordx2 v[25:26], v[19:20], off offset:-2048
 ; GFX900-NEXT:    global_load_dwordx2 v[27:28], v[19:20], off
-; GFX900-NEXT:    v_add_co_u32_e32 v21, vcc, s3, v4
-; GFX900-NEXT:    v_addc_co_u32_e32 v22, vcc, -1, v5, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v21, vcc, s3, v5
+; GFX900-NEXT:    v_addc_co_u32_e32 v22, vcc, -1, v6, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[19:20], v[21:22], off offset:-2048
-; GFX900-NEXT:    global_load_dwordx2 v[29:30], v[4:5], off
-; GFX900-NEXT:    v_add_co_u32_e32 v4, vcc, 0x10000, v4
-; GFX900-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
+; GFX900-NEXT:    global_load_dwordx2 v[29:30], v[5:6], off
+; GFX900-NEXT:    v_add_co_u32_e32 v5, vcc, 0x10000, v5
+; GFX900-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
 ; GFX900-NEXT:    s_addk_i32 s4, 0x2000
 ; GFX900-NEXT:    s_cmp_gt_u32 s4, 0x3fffff
 ; GFX900-NEXT:    s_waitcnt vmcnt(8)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v7, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v8, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v8, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(7)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v17, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v18, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v17, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v18, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(5)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v13, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v14, v3, vcc
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v15, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v16, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v13, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v14, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v15, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v16, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(4)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v23, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v24, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v23, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v24, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(3)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v25, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v26, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v25, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v26, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(2)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v27, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v28, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v27, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v28, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(1)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v19, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v20, v3, vcc
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v9, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v10, v3, vcc
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v11, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v12, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v19, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v20, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v9, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v10, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v11, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v12, v4, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(0)
-; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v29, v2
-; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v30, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v29, v3
+; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v30, v4, vcc
 ; GFX900-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX900-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX900-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -656,9 +656,9 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX900-NEXT:    s_branch .LBB1_1
 ; GFX900-NEXT:  .LBB1_5: ; %while.end
 ; GFX900-NEXT:    v_mov_b32_e32 v1, s35
-; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s34, v6
+; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s34, v0
 ; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX900-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX900-NEXT:    global_store_dwordx2 v[0:1], v[3:4], off
 ; GFX900-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: clmem_read:
@@ -683,40 +683,40 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xff, v0
-; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 17, v0
-; GFX10-NEXT:    s_movk_i32 s1, 0x7f
-; GFX10-NEXT:    v_lshlrev_b64 v[0:1], 3, v[1:2]
-; GFX10-NEXT:    v_and_b32_e32 v6, 0xfe000000, v3
-; GFX10-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 17, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v3, 0
-; GFX10-NEXT:    v_or_b32_e32 v0, v0, v6
-; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, s34
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, s35, v1, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, 0x5000, v0
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX10-NEXT:    v_mov_b32_e32 v4, 0
+; GFX10-NEXT:    s_movk_i32 s1, 0x7f
+; GFX10-NEXT:    v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
+; GFX10-NEXT:    v_or_b32_e32 v1, v1, v0
+; GFX10-NEXT:    v_add_co_u32 v1, vcc_lo, v1, s34
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, s35, v2, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v1, vcc_lo, 0x5000, v1
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
 ; GFX10-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX10-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX10-NEXT:    ; Child Loop BB1_2 Depth 2
+; GFX10-NEXT:    v_mov_b32_e32 v6, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v5, v1
-; GFX10-NEXT:    v_mov_b32_e32 v4, v0
 ; GFX10-NEXT:    s_mov_b32 s2, 0
 ; GFX10-NEXT:  .LBB1_2: ; %for.body
 ; GFX10-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX10-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v4, 0xffffb800
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v9, vcc_lo, v4, 0xffffc800
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v13, vcc_lo, v4, 0xffffd800
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v14, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v17, vcc_lo, v4, 0xffffe800
+; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v5, 0xffffb800
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v9, vcc_lo, v5, 0xffffc800
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v13, vcc_lo, v5, 0xffffd800
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v14, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v17, vcc_lo, v5, 0xffffe800
 ; GFX10-NEXT:    s_clause 0x2
 ; GFX10-NEXT:    global_load_dwordx2 v[11:12], v[7:8], off offset:-2048
 ; GFX10-NEXT:    global_load_dwordx2 v[15:16], v[9:10], off offset:-2048
 ; GFX10-NEXT:    global_load_dwordx2 v[19:20], v[13:14], off offset:-2048
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v4
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v5
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo
 ; GFX10-NEXT:    s_clause 0x7
 ; GFX10-NEXT:    global_load_dwordx2 v[23:24], v[17:18], off offset:-2048
 ; GFX10-NEXT:    global_load_dwordx2 v[7:8], v[7:8], off
@@ -724,42 +724,42 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX10-NEXT:    global_load_dwordx2 v[13:14], v[13:14], off
 ; GFX10-NEXT:    global_load_dwordx2 v[25:26], v[17:18], off
 ; GFX10-NEXT:    global_load_dwordx2 v[27:28], v[21:22], off
-; GFX10-NEXT:    global_load_dwordx2 v[29:30], v[4:5], off offset:-2048
-; GFX10-NEXT:    global_load_dwordx2 v[31:32], v[4:5], off
-; GFX10-NEXT:    v_add_co_u32 v4, vcc_lo, 0x10000, v4
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
+; GFX10-NEXT:    global_load_dwordx2 v[29:30], v[5:6], off offset:-2048
+; GFX10-NEXT:    global_load_dwordx2 v[31:32], v[5:6], off
+; GFX10-NEXT:    v_add_co_u32 v5, vcc_lo, 0x10000, v5
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
 ; GFX10-NEXT:    s_addk_i32 s2, 0x2000
 ; GFX10-NEXT:    s_cmp_gt_u32 s2, 0x3fffff
 ; GFX10-NEXT:    s_waitcnt vmcnt(10)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v11, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v12, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v11, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v12, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(6)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v7, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v8, v3, s0
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v15, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v16, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v7, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v8, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v15, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v16, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(5)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v9, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v10, v3, s0
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v19, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v20, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v9, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v10, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v19, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v20, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(4)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v13, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v14, v3, s0
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v23, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v24, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v13, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v14, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v23, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v24, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v25, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v26, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v25, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v26, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(2)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v27, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v28, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v27, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v28, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-NEXT:    v_add_co_u32 v2, s0, v29, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v30, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v3, s0, v29, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v30, v4, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v31, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v32, v3, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v3, vcc_lo, v31, v3
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v4, vcc_lo, v32, v4, vcc_lo
 ; GFX10-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX10-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX10-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -770,9 +770,9 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX10-NEXT:    s_mov_b32 s1, s0
 ; GFX10-NEXT:    s_branch .LBB1_1
 ; GFX10-NEXT:  .LBB1_5: ; %while.end
-; GFX10-NEXT:    v_add_co_u32 v0, s0, s34, v6
+; GFX10-NEXT:    v_add_co_u32 v0, s0, s34, v0
 ; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
-; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[3:4], off
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX90A-LABEL: clmem_read:
@@ -903,102 +903,102 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_swappc_b64 s[30:31], s[2:3]
 ; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_and_b32 v1, 0xff, v0
-; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 17, v0
+; GFX11-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v0, 17, v0
+; GFX11-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX11-NEXT:    s_movk_i32 s1, 0x7f
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_lshlrev_b64 v[0:1], 3, v[1:2]
-; GFX11-NEXT:    v_and_b32_e32 v6, 0xfe000000, v3
-; GFX11-NEXT:    v_mov_b32_e32 v2, 0
-; GFX11-NEXT:    v_mov_b32_e32 v3, 0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_or_b32_e32 v0, v0, v6
-; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, s34
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, s35, v1, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, 0x5000, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX11-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_or_b32_e32 v1, v1, v0
+; GFX11-NEXT:    v_add_co_u32 v1, vcc_lo, v1, s34
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, s35, v2, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v1, vcc_lo, 0x5000, v1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
 ; GFX11-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX11-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX11-NEXT:    ; Child Loop BB1_2 Depth 2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX11-NEXT:    v_dual_mov_b32 v6, v2 :: v_dual_mov_b32 v5, v1
 ; GFX11-NEXT:    s_mov_b32 s2, 0
 ; GFX11-NEXT:  .LBB1_2: ; %for.body
 ; GFX11-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX11-NEXT:    ; => This Inner Loop Header: Depth=2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v4, 0xffffc000
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v9, vcc_lo, 0xffffc000, v4
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v11, vcc_lo, 0xffffd000, v4
+; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v5, 0xffffc000
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v9, vcc_lo, 0xffffc000, v5
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v11, vcc_lo, 0xffffd000, v5
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_load_b64 v[13:14], v[7:8], off offset:-4096
 ; GFX11-NEXT:    global_load_b64 v[9:10], v[9:10], off offset:-2048
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v12, vcc_lo, -1, v5, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v15, vcc_lo, v4, 0xffffe000
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v16, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v12, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v15, vcc_lo, v5, 0xffffe000
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v16, vcc_lo, -1, v6, vcc_lo
 ; GFX11-NEXT:    global_load_b64 v[11:12], v[11:12], off offset:-2048
-; GFX11-NEXT:    v_add_co_u32 v17, vcc_lo, 0xffffe000, v4
+; GFX11-NEXT:    v_add_co_u32 v17, vcc_lo, 0xffffe000, v5
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_load_b64 v[19:20], v[15:16], off offset:-4096
 ; GFX11-NEXT:    global_load_b64 v[7:8], v[7:8], off
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v4
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v5
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo
 ; GFX11-NEXT:    s_clause 0x5
 ; GFX11-NEXT:    global_load_b64 v[17:18], v[17:18], off offset:-2048
 ; GFX11-NEXT:    global_load_b64 v[15:16], v[15:16], off
 ; GFX11-NEXT:    global_load_b64 v[21:22], v[21:22], off offset:-2048
-; GFX11-NEXT:    global_load_b64 v[23:24], v[4:5], off offset:-4096
-; GFX11-NEXT:    global_load_b64 v[25:26], v[4:5], off offset:-2048
-; GFX11-NEXT:    global_load_b64 v[27:28], v[4:5], off
-; GFX11-NEXT:    v_add_co_u32 v4, vcc_lo, 0x10000, v4
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
+; GFX11-NEXT:    global_load_b64 v[23:24], v[5:6], off offset:-4096
+; GFX11-NEXT:    global_load_b64 v[25:26], v[5:6], off offset:-2048
+; GFX11-NEXT:    global_load_b64 v[27:28], v[5:6], off
+; GFX11-NEXT:    v_add_co_u32 v5, vcc_lo, 0x10000, v5
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
 ; GFX11-NEXT:    s_addk_i32 s2, 0x2000
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    s_cmp_gt_u32 s2, 0x3fffff
 ; GFX11-NEXT:    s_waitcnt vmcnt(10)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v13, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v14, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v13, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v14, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(9)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v9, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v10, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v9, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v10, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(6)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v7, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v8, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v7, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v8, v4, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v11, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v12, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v11, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v12, v4, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v19, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v20, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v19, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v20, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(5)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v17, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v18, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v17, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v18, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(4)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v15, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v16, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v15, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v16, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(3)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v21, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v22, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v21, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v22, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(2)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v23, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v24, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v23, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v24, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(1)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v25, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v26, v3, s0
+; GFX11-NEXT:    v_add_co_u32 v3, s0, v25, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v26, v4, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v27, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v28, v3, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v3, vcc_lo, v27, v3
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v4, vcc_lo, v28, v4, vcc_lo
 ; GFX11-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX11-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX11-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -1009,10 +1009,10 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX11-NEXT:    s_mov_b32 s1, s0
 ; GFX11-NEXT:    s_branch .LBB1_1
 ; GFX11-NEXT:  .LBB1_5: ; %while.end
-; GFX11-NEXT:    v_add_co_u32 v0, s0, s34, v6
+; GFX11-NEXT:    v_add_co_u32 v0, s0, s34, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s35, 0, s0
-; GFX11-NEXT:    global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT:    global_store_b64 v[0:1], v[3:4], off
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-dbg.mir b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-dbg.mir
index b4ed2e0444711..4e1f912a9d6f9 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-dbg.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-dbg.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-enable-rewrite-partial-reg-uses=true -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s
 --- |
   define void @test_vreg_96_w64() !dbg !5 {
   entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir
index 4e3b726e1de5e..9c042351bbcbe 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-enable-rewrite-partial-reg-uses=true -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s
 ---
 name: test_subregs_composition_vreg_1024
 tracksRegLiveness: true

diff  --git a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses.mir b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses.mir
index aaaa4fb995cf6..32e313c25fd23 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -start-before=rename-independent-subregs %s -o /dev/null 2>&1
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-enable-rewrite-partial-reg-uses=true -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-enable-rewrite-partial-reg-uses=true -verify-machineinstrs -start-before=rename-independent-subregs %s -o /dev/null 2>&1
 ---
 name: test_subregs_composition_vreg_1024
 tracksRegLiveness: true

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index e27b5f45a4475..0f58c6a96bf52 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -164,33 +164,33 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s12
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s13
-; GCN-IR-NEXT:    s_min_u32 s20, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s16, s14, s20
+; GCN-IR-NEXT:    s_min_u32 s18, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s16, s14, s18
 ; GCN-IR-NEXT:    s_subb_u32 s17, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[16:17], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[16:17], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[16:17], 63
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
+; GCN-IR-NEXT:    s_or_b64 s[20:21], s[10:11], s[20:21]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[20:21], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s13
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s12
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
+; GCN-IR-NEXT:    s_or_b64 s[20:21], s[20:21], s[22:23]
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s16, 1
-; GCN-IR-NEXT:    s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT:    s_add_u32 s20, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s21, s17, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[20:21], 0
 ; GCN-IR-NEXT:    s_sub_i32 s16, 63, s16
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s16
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s18
-; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s20
+; GCN-IR-NEXT:    s_add_u32 s19, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s20, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s18
 ; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
@@ -201,8 +201,8 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
-; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
+; GCN-IR-NEXT:    s_sub_u32 s8, s19, s16
+; GCN-IR-NEXT:    s_subb_u32 s8, s20, s17
 ; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
 ; GCN-IR-NEXT:    s_mov_b32 s15, s14
 ; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
@@ -211,9 +211,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
 ; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
 ; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[22:23]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow6
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
@@ -362,85 +362,85 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v12, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v12, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v13, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v5, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v10, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v11, vcc, v1, v4, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v5
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v6
+; GCN-IR-NEXT:    v_min_u32_e32 v12, v2, v3
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v10
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v13, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v12, v13
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v14, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v13
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v9, v11, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v8, v10, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[14:15]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[10:11], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v5, v10
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT:    v_not_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v5, v11
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v9, v12
+; GCN-IR-NEXT:    v_lshr_b64 v[14:15], v[10:11], v14
+; GCN-IR-NEXT:    v_not_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, v9, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v8, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v14, v14, v8
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v16, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v17, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v16, v14
+; GCN-IR-NEXT:    v_subb_u32_e32 v8, vcc, v17, v15, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v12, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v13, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e64 v14, s[4:5], v14, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[4:5], v15, v13, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v9
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v8
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
@@ -448,14 +448,14 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v9, v9, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v0
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v15, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v3, v4, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v7, v6
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v8, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v9, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
@@ -1001,33 +1001,33 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s12
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s13
-; GCN-IR-NEXT:    s_min_u32 s20, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s16, s14, s20
+; GCN-IR-NEXT:    s_min_u32 s18, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s16, s14, s18
 ; GCN-IR-NEXT:    s_subb_u32 s17, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[16:17], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[16:17], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[16:17], 63
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
+; GCN-IR-NEXT:    s_or_b64 s[20:21], s[10:11], s[20:21]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[20:21], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s13
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s12
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
+; GCN-IR-NEXT:    s_or_b64 s[20:21], s[20:21], s[22:23]
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s16, 1
-; GCN-IR-NEXT:    s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT:    s_add_u32 s20, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s21, s17, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[20:21], 0
 ; GCN-IR-NEXT:    s_sub_i32 s16, 63, s16
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s16
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s18
-; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s20
+; GCN-IR-NEXT:    s_add_u32 s19, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s20, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s18
 ; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
@@ -1038,8 +1038,8 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
-; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
+; GCN-IR-NEXT:    s_sub_u32 s8, s19, s16
+; GCN-IR-NEXT:    s_subb_u32 s8, s20, s17
 ; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
 ; GCN-IR-NEXT:    s_mov_b32 s15, s14
 ; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
@@ -1048,9 +1048,9 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
 ; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
 ; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[22:23]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_3
 ; GCN-IR-NEXT:  .LBB9_4: ; %Flow3
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
@@ -1206,32 +1206,32 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_min_u32 s14, s10, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s14, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s13, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[12:13], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
-; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_sub_i32 s11, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s10
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s11
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s12
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s14
 ; GCN-IR-NEXT:    s_add_u32 s16, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s10, 58, s14
+; GCN-IR-NEXT:    s_sub_u32 s10, 58, s10
 ; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -1385,87 +1385,87 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v12, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v12, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v5, vcc, s6, v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v6, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[5:6]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[5:6]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v5
+; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v5
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], 24, v9
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 58, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB11_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v6, v0
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v7, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
@@ -1578,39 +1578,39 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v12, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v12, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v5, vcc, s6, v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v6, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0x8000
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[5:6]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[5:6]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v7, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v5
+; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v5
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[8:9], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
@@ -1618,50 +1618,50 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], s[4:5], v9
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 47, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v6, v0
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v7, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
@@ -1681,84 +1681,84 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v10, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v10, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v10, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v7, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v8, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v7
 ; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v0, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v8
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v0, v0, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v3, s[4:5], 48, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v4, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[3:4]
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v2
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[3:4]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v5, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v4, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v8, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v3, s[4:5], 63, v3
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[7:8], v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[9:10], v[7:8], v9
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 0xffffffcf, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v8, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s12, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[9:10], v[9:10], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v9, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v5, vcc, s12, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, 0, v10, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v7
+; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[3:4], 1
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v9, 31, v5
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v8, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v5, 1, v9
+; GCN-IR-NEXT:    v_and_b32_e32 v9, 0x8000, v9
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v9, s[4:5], v0, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v10, s[4:5], 0, v10, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB13_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v3, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v2, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[3:4], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v6, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v3, v1, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
index 0a625ba2017c6..6da316db0f0e4 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
@@ -19,15 +19,17 @@ body:             |
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1786773504, implicit $exec
-  ; CHECK-NEXT:   dead [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 0, [[V_MOV_B32_e32_]], implicit $mode, implicit $exec
-  ; CHECK-NEXT:   undef %0.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %0.sub1, implicit $mode, implicit $exec
+  ; CHECK-NEXT:   undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec
+  ; CHECK-NEXT:   dead [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit $mode, implicit $exec
+  ; CHECK-NEXT:   undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit $mode, implicit $exec
+  ; CHECK-NEXT:   SI_SPILL_V64_SAVE %7, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
-  ; CHECK-NEXT:   S_NOP 0, implicit %0.sub1
-  ; CHECK-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1786773504, implicit $exec
-  ; CHECK-NEXT:   S_NOP 0, implicit [[V_MOV_B32_e32_1]]
-  ; CHECK-NEXT:   S_NOP 0, implicit undef %0.sub0
+  ; CHECK-NEXT:   undef %6.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec
+  ; CHECK-NEXT:   S_NOP 0, implicit %6.sub1
+  ; CHECK-NEXT:   [[SI_SPILL_V64_RESTORE:%[0-9]+]]:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+  ; CHECK-NEXT:   S_NOP 0, implicit [[SI_SPILL_V64_RESTORE]].sub1
+  ; CHECK-NEXT:   S_NOP 0, implicit undef %9.sub0:vreg_64
   bb.0:
     successors: %bb.1
 
@@ -57,13 +59,13 @@ body:             |
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1786773504, implicit $exec
-  ; CHECK-NEXT:   undef %2.sub2:vreg_96 = V_MOV_B32_e32 0, implicit $exec
+  ; CHECK-NEXT:   undef %1.sub2:vreg_128 = V_MOV_B32_e32 1786773504, implicit $exec
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   S_NOP 0, implicit %1.sub2
+  ; CHECK-NEXT:   undef %2.sub2:vreg_128 = V_MOV_B32_e32 0, implicit $exec
   ; CHECK-NEXT:   S_NOP 0, implicit %2.sub2
-  ; CHECK-NEXT:   S_NOP 0, implicit [[V_MOV_B32_e32_]]
-  ; CHECK-NEXT:   S_NOP 0, implicit undef %2.sub0
+  ; CHECK-NEXT:   S_NOP 0, implicit undef %4.sub0:vreg_128
   bb.0:
     successors: %bb.1
 

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll b/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
index f1db395b35182..c056d35c56beb 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
@@ -137,7 +137,7 @@ define amdgpu_kernel void @max_256_vgprs_spill_9x32(ptr addrspace(1) %p) #1 {
 
 ; GFX900: NumVgprs: 256
 ; GFX908: NumVgprs: 252
-; GFX900: ScratchSize: 132
+; GFX900: ScratchSize: 1668
 ; GFX908: ScratchSize: 0
 ; GFX900: VGPRBlocks: 63
 ; GFX908: VGPRBlocks: 62

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 9e01dcfaccc47..46befaae998b5 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -135,34 +135,34 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s18, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[8:9], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[16:17], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s16
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s6, s18
+; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
 ; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -339,107 +339,107 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v14
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v4
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v14, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
-; GCN-IR-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
-; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
-; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v4, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v3, vcc, v3, v6, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v2
+; GCN-IR-NEXT:    v_add_i32_e64 v6, s[6:7], 32, v6
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v6, v7
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v0
+; GCN-IR-NEXT:    v_add_i32_e64 v6, s[6:7], 32, v6
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v6, v7
+; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    v_subb_u32_e64 v8, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[7:8]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[7:8]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v14
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, v4
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v9, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v7
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v8, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 63, v7
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v9, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], v[0:1], v12
+; GCN-IR-NEXT:    v_not_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, v9, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, 0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v8, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v16, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v17, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v8
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v16, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v8, vcc, v17, v13, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v14, v6
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v15, v7
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v14
+; GCN-IR-NEXT:    v_and_b32_e32 v15, v14, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v14, v14, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e64 v12, s[4:5], v12, v14
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[4:5], v13, v15, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v9
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, v8
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v9, v9, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v8, v6
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v4, v2, v7
-; GCN-IR-NEXT:    v_mul_hi_u32 v5, v2, v6
+; GCN-IR-NEXT:    v_mul_lo_u32 v7, v2, v9
+; GCN-IR-NEXT:    v_mul_hi_u32 v8, v2, v6
 ; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v6
 ; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, v8, v7
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v7, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v15
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v15, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
@@ -1037,33 +1037,33 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
-; GCN-IR-NEXT:    s_min_u32 s20, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s20
+; GCN-IR-NEXT:    s_min_u32 s16, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s16
 ; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[16:17]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[16:17], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s14, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s18, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s19, s15, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
 ; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[2:3], s14
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s16
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s18
 ; GCN-IR-NEXT:    s_add_u32 s18, s8, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s9, -1
 ; GCN-IR-NEXT:    s_not_b64 s[6:7], s[12:13]
-; GCN-IR-NEXT:    s_add_u32 s12, s6, s20
+; GCN-IR-NEXT:    s_add_u32 s12, s6, s16
 ; GCN-IR-NEXT:    s_addc_u32 s13, s7, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -1188,33 +1188,33 @@ define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s4
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s5
-; GCN-IR-NEXT:    s_min_u32 s20, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s20
+; GCN-IR-NEXT:    s_min_u32 s16, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s16
 ; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[16:17]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[16:17], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s5
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s4
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s14, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s18, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s19, s15, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
 ; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[4:5], s14
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[4:5], s16
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[4:5], s18
 ; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
 ; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
@@ -1396,32 +1396,32 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s4
 ; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s5
-; GCN-IR-NEXT:    s_min_u32 s12, s2, s3
-; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_min_u32 s8, s2, s3
+; GCN-IR-NEXT:    s_add_u32 s2, s8, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[2:3], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[4:5], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[2:3], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[2:3], 63
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[10:11], exec
-; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT:    s_mov_b32 s9, 0
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[12:13], exec
+; GCN-IR-NEXT:    s_cselect_b32 s10, 0, 24
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s8, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT:    s_add_u32 s10, s2, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s3, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
 ; GCN-IR-NEXT:    s_sub_i32 s2, 63, s2
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], 24, s2
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s8
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
 ; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -1448,13 +1448,13 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow5
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[6:7], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[2:3]
 ; GCN-IR-NEXT:  .LBB10_5: ; %udiv-end
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-IR-NEXT:    v_mul_hi_u32 v0, s4, v0
-; GCN-IR-NEXT:    s_mul_i32 s6, s4, s9
-; GCN-IR-NEXT:    s_mul_i32 s5, s5, s8
-; GCN-IR-NEXT:    s_mul_i32 s4, s4, s8
+; GCN-IR-NEXT:    s_mul_i32 s6, s4, s11
+; GCN-IR-NEXT:    s_mul_i32 s5, s5, s10
+; GCN-IR-NEXT:    s_mul_i32 s4, s4, s10
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s6, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, s5, v0
 ; GCN-IR-NEXT:    v_sub_i32_e64 v0, vcc, 24, s4
@@ -1582,25 +1582,25 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, s6, v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1610,8 +1610,8 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v7
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1645,16 +1645,16 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB11_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v6
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v5
-; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v4, v0, v3
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
@@ -1773,27 +1773,27 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, s6, v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v4, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0x8000
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[3:4]
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[3:4]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v5, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v3
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1804,8 +1804,8 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v7
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1839,15 +1839,15 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v4, v6
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v5
-; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v5
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
@@ -1873,87 +1873,87 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
-; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v0
+; GCN-IR-NEXT:    v_add_i32_e64 v3, s[4:5], 32, v3
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v3, v4
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v1, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v9
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, s12, v10
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v12, 0x8000, v12
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v11, s[4:5], 0, v11, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB13_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 15
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[6:7], 15
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 9ebadb495841e..ba44b1c6a5c96 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -136,34 +136,34 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s16, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
+; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[8:9], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[16:17], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s16
+; GCN-IR-NEXT:    s_add_u32 s15, s4, -1
+; GCN-IR-NEXT:    s_addc_u32 s16, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s2, s2, s16
+; GCN-IR-NEXT:    s_add_u32 s2, s2, s14
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -174,8 +174,8 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s6, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s6, s15, s13
+; GCN-IR-NEXT:    s_sub_u32 s6, s15, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s16, s13
 ; GCN-IR-NEXT:    s_ashr_i32 s10, s6, 31
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
 ; GCN-IR-NEXT:    s_and_b32 s6, s10, 1
@@ -184,9 +184,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
 ; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[2:3], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow6
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[8:9], 1
@@ -319,12 +319,12 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
@@ -339,10 +339,10 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
@@ -351,38 +351,38 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v10
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v0, v10
+; GCN-IR-NEXT:    v_not_b32_e32 v0, v8
 ; GCN-IR-NEXT:    v_not_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v11
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v11, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v9, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
@@ -804,33 +804,33 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s8
 ; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s9
-; GCN-IR-NEXT:    s_min_u32 s16, s4, s5
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
+; GCN-IR-NEXT:    s_min_u32 s14, s4, s5
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[6:7], s[14:15]
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[14:15], exec
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[6:7], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s9
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s8
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[16:17], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[8:9], s14
-; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[8:9], s16
+; GCN-IR-NEXT:    s_add_u32 s15, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s16, s3, -1
 ; GCN-IR-NEXT:    s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s8, s4, s16
+; GCN-IR-NEXT:    s_add_u32 s8, s4, s14
 ; GCN-IR-NEXT:    s_addc_u32 s9, s5, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -841,8 +841,8 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT:    s_sub_u32 s4, s15, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, s16, s13
 ; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
 ; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
@@ -851,9 +851,9 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
 ; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
 ; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_3
 ; GCN-IR-NEXT:  .LBB7_4: ; %Flow3
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[6:7], 1
@@ -989,32 +989,32 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_min_u32 s12, s8, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s8, s8, s9
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[6:7], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[12:13], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
-; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s9
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s12
 ; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1160,8 +1160,8 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v6
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
@@ -1176,10 +1176,10 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1190,8 +1190,8 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v7
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1251,8 +1251,8 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v6
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
@@ -1265,10 +1265,10 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1276,35 +1276,35 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[7:8], v[0:1], v7
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v7, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v8, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v7, 31, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v7
+; GCN-IR-NEXT:    v_and_b32_e32 v7, 0x8000, v7
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
+; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], v6, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
@@ -1435,31 +1435,31 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
-; GCN-IR-NEXT:    s_min_u32 s12, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT:    s_min_u32 s10, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s10
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc4
+; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s12
+; GCN-IR-NEXT:    s_add_u32 s2, s10, 0xffffffc4
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1602,8 +1602,8 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v6
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
@@ -1616,10 +1616,10 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1627,34 +1627,34 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[7:8], v[0:1], v7
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v7, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v8, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v7, 31, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 24, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v7
+; GCN-IR-NEXT:    v_and_b32_e32 v7, 24, v7
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
+; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], v6, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 5a7ef4517b13e..d6fcda0a02c6b 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -135,34 +135,34 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s18, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[8:9], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[16:17], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s16
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s6, s18
+; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
 ; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -328,30 +328,30 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v5, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[5:6]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[5:6]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v5
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v5
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
@@ -361,10 +361,10 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
+; GCN-IR-NEXT:    v_not_b32_e32 v7, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v10
 ; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v9
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -400,15 +400,15 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v4, v2, v7
-; GCN-IR-NEXT:    v_mul_hi_u32 v5, v2, v6
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v6
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-IR-NEXT:    v_mul_lo_u32 v5, v2, v7
+; GCN-IR-NEXT:    v_mul_hi_u32 v6, v2, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
@@ -817,32 +817,32 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_min_u32 s12, s8, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s8, s8, s9
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[6:7], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[12:13], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
-; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s9
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s12
 ; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1001,31 +1001,31 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
-; GCN-IR-NEXT:    s_min_u32 s12, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_min_u32 s8, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s10, 59, s8
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
-; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s9
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc4
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s12
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 0xffffffc4
 ; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1178,26 +1178,26 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 0xffffffd0, v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v4, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0x8000
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[3:4]
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[3:4]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v5, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v3
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1208,8 +1208,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v7
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1243,15 +1243,15 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB8_5: ; %Flow3
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v4, v6
 ; GCN-IR-NEXT:  .LBB8_6: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v5
-; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v5
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
@@ -1275,8 +1275,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v6
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1289,10 +1289,10 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1300,8 +1300,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v7
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0


        


More information about the llvm-commits mailing list