[llvm] 71ed66d - [AMDGPU] Make v4i64/v4f64/v8i64/v8f64 legal

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Tue May 12 16:05:23 PDT 2020


Author: Stanislav Mekhanoshin
Date: 2020-05-12T16:05:12-07:00
New Revision: 71ed66d97fd624313bef693fa9da54fa66bdcd09

URL: https://github.com/llvm/llvm-project/commit/71ed66d97fd624313bef693fa9da54fa66bdcd09
DIFF: https://github.com/llvm/llvm-project/commit/71ed66d97fd624313bef693fa9da54fa66bdcd09.diff

LOG: [AMDGPU] Make v4i64/v4f64/v8i64/v8f64 legal

We can produce such vectors in the Promote Alloca pass,
but we are unable to use movrel to operate it and lower
via scratch. Making it legal makes SI_INDIRECT patterns
work.

There is more work to do in subsequent changes:

1. We initialize m0 twice to access each dword. It shall
be possible to only do it once and increment base register
number instead.
2. We also need v16i64/v16f64 but these first need to be
added to tablegen.

Differential Revision: https://reviews.llvm.org/D79808

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/lib/Target/AMDGPU/SIRegisterInfo.td
    llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
    llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/idot2.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
    llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
    llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
    llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
    llvm/test/CodeGen/AMDGPU/saddo.ll
    llvm/test/CodeGen/AMDGPU/select.f16.ll
    llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
    llvm/test/CodeGen/AMDGPU/shift-i128.ll
    llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
    llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
    llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 014bb5a1b4ee..8f2823e7fdf8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -107,6 +107,18 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
 
+  setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
+  AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
+
+  setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
+  AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
+
+  setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
+  AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
+
+  setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
+  AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
+
   // There are no 64-bit extloads. These should be done as a 32-bit extload and
   // an extension to 64-bit.
   for (MVT VT : MVT::integer_valuetypes()) {
@@ -207,6 +219,18 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
 
+  setOperationAction(ISD::STORE, MVT::v4i64, Promote);
+  AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
+
+  setOperationAction(ISD::STORE, MVT::v4f64, Promote);
+  AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
+
+  setOperationAction(ISD::STORE, MVT::v8i64, Promote);
+  AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
+
+  setOperationAction(ISD::STORE, MVT::v8f64, Promote);
+  AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
+
   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
@@ -231,6 +255,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
 
+  setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
+  setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
 

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9f155125266b..556cdff9fd49 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -148,9 +148,15 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
   addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass);
   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
 
+  addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass);
+  addRegisterClass(MVT::v4f64, &AMDGPU::VReg_256RegClass);
+
   addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass);
   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
 
+  addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass);
+  addRegisterClass(MVT::v8f64, &AMDGPU::VReg_512RegClass);
+
   if (Subtarget->has16BitInsts()) {
     addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
     addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass);
@@ -233,6 +239,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
 
   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
+  setOperationAction(ISD::TRUNCATE, MVT::v4i32, Expand);
+  setOperationAction(ISD::FP_ROUND, MVT::v4f32, Expand);
+  setOperationAction(ISD::TRUNCATE, MVT::v8i32, Expand);
+  setOperationAction(ISD::FP_ROUND, MVT::v8f32, Expand);
 
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
@@ -269,6 +279,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
   // with > 4 elements.
   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
+                  MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64,
                   MVT::v32i32, MVT::v32f32 }) {
     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
       switch (Op) {
@@ -313,6 +324,34 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
   }
 
+  for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) {
+    setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
+    AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32);
+
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
+    AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32);
+
+    setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
+    AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32);
+
+    setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
+    AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32);
+  }
+
+  for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) {
+    setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
+    AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32);
+
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
+    AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32);
+
+    setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
+    AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32);
+
+    setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
+    AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32);
+  }
+
   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index ab6f51187a2f..5a5acf6577a0 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1177,10 +1177,31 @@ def : BitConvert <v8i32, v8f32, SReg_256>;
 def : BitConvert <v8f32, v8i32, SReg_256>;
 def : BitConvert <v8i32, v8f32, VReg_256>;
 def : BitConvert <v8f32, v8i32, VReg_256>;
+def : BitConvert <v4i64, v4f64, VReg_256>;
+def : BitConvert <v4f64, v4i64, VReg_256>;
+def : BitConvert <v4i64, v8i32, VReg_256>;
+def : BitConvert <v4i64, v8f32, VReg_256>;
+def : BitConvert <v4f64, v8i32, VReg_256>;
+def : BitConvert <v4f64, v8f32, VReg_256>;
+def : BitConvert <v8i32, v4i64, VReg_256>;
+def : BitConvert <v8f32, v4i64, VReg_256>;
+def : BitConvert <v8i32, v4f64, VReg_256>;
+def : BitConvert <v8f32, v4f64, VReg_256>;
+
 
 // 512-bit bitcast
 def : BitConvert <v16i32, v16f32, VReg_512>;
 def : BitConvert <v16f32, v16i32, VReg_512>;
+def : BitConvert <v8i64,  v8f64, VReg_512>;
+def : BitConvert <v8f64,  v8i64, VReg_512>;
+def : BitConvert <v8i64,  v16i32, VReg_512>;
+def : BitConvert <v8f64,  v16i32, VReg_512>;
+def : BitConvert <v16i32, v8i64,  VReg_512>;
+def : BitConvert <v16i32, v8f64,  VReg_512>;
+def : BitConvert <v8i64,  v16f32, VReg_512>;
+def : BitConvert <v8f64,  v16f32, VReg_512>;
+def : BitConvert <v16f32, v8i64,  VReg_512>;
+def : BitConvert <v16f32, v8f64,  VReg_512>;
 
 // 1024-bit bitcast
 def : BitConvert <v32i32, v32f32, VReg_1024>;

diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 5728a4df2995..60032c91fc75 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -736,32 +736,32 @@ def SReg_192 : RegisterClass<"AMDGPU", [untyped], 32, (add SGPR_192)> {
   let isAllocatable = 0;
 }
 
-def SGPR_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64], 32, (add SGPR_256Regs)> {
+def SGPR_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64, v4f64], 32, (add SGPR_256Regs)> {
   let AllocationPriority = 18;
 }
 
-def TTMP_256 : RegisterClass<"AMDGPU", [v8i32, v8f32], 32, (add TTMP_256Regs)> {
+def TTMP_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64, v4f64], 32, (add TTMP_256Regs)> {
   let isAllocatable = 0;
 }
 
-def SReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64], 32,
+def SReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64, v4f64], 32,
                              (add SGPR_256, TTMP_256)> {
   // Requires 4 s_mov_b64 to copy
   let CopyCost = 4;
   let isAllocatable = 0;
 }
 
-def SGPR_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32,
+def SGPR_512 : RegisterClass<"AMDGPU", [v16i32, v16f32, v8i64, v8f64], 32,
                              (add SGPR_512Regs)> {
   let AllocationPriority = 19;
 }
 
-def TTMP_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32,
+def TTMP_512 : RegisterClass<"AMDGPU", [v16i32, v16f32, v8i64, v8f64], 32,
                              (add TTMP_512Regs)> {
   let isAllocatable = 0;
 }
 
-def SReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32,
+def SReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32, v8i64, v8f64], 32,
                              (add SGPR_512, TTMP_512)> {
   // Requires 8 s_mov_b64 to copy
   let CopyCost = 8;
@@ -801,8 +801,8 @@ def VReg_96 : VRegClass<3, [v3i32, v3f32], (add VGPR_96)>;
 def VReg_128 : VRegClass<4, [v4i32, v4f32, v2i64, v2f64, i128], (add VGPR_128)>;
 def VReg_160 : VRegClass<5, [v5i32, v5f32], (add VGPR_160)>;
 def VReg_192 : VRegClass<6, [untyped], (add VGPR_192)>;
-def VReg_256 : VRegClass<8, [v8i32, v8f32], (add VGPR_256)>;
-def VReg_512 : VRegClass<16, [v16i32, v16f32], (add VGPR_512)>;
+def VReg_256 : VRegClass<8, [v8i32, v8f32, v4i64, v4f64], (add VGPR_256)>;
+def VReg_512 : VRegClass<16, [v16i32, v16f32, v8i64, v8f64], (add VGPR_512)>;
 def VReg_1024 : VRegClass<32, [v32i32, v32f32], (add VGPR_1024)>;
 
 class ARegClass<int numRegs, list<ValueType> regTypes, dag regList> :
@@ -817,8 +817,8 @@ def AReg_96 : ARegClass<3, [v3i32, v3f32], (add AGPR_96)>;
 def AReg_128 : ARegClass<4, [v4i32, v4f32, v2i64, v2f64], (add AGPR_128)>;
 def AReg_160 : ARegClass<5, [v5i32, v5f32], (add AGPR_160)>;
 def AReg_192 : ARegClass<6, [untyped], (add AGPR_192)>;
-def AReg_256 : ARegClass<8, [v8i32, v8f32], (add AGPR_256)>;
-def AReg_512 : ARegClass<16, [v16i32, v16f32], (add AGPR_512)>;
+def AReg_256 : ARegClass<8, [v8i32, v8f32, v4i64, v4f64], (add AGPR_256)>;
+def AReg_512 : ARegClass<16, [v16i32, v16f32, v8i64, v8f64], (add AGPR_512)>;
 def AReg_1024 : ARegClass<32, [v32i32, v32f32], (add AGPR_1024)>;
 
 } // End GeneratePressureSet = 0

diff  --git a/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll b/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
index 29ab79ddf201..0306177f6423 100644
--- a/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
+++ b/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
@@ -51,22 +51,24 @@ define amdgpu_kernel void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x
 ; SI-LABEL: test_copy_v4i8_x2:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; SI-NEXT:    s_mov_b32 s11, 0xf000
-; SI-NEXT:    s_mov_b32 s2, 0
-; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s10, 0
+; SI-NEXT:    s_mov_b32 s11, s3
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
-; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s8, s6
-; SI-NEXT:    s_mov_b32 s9, s7
-; SI-NEXT:    s_mov_b32 s6, s10
-; SI-NEXT:    s_mov_b32 s7, s11
+; SI-NEXT:    buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s12, s6
+; SI-NEXT:    s_mov_b32 s13, s7
+; SI-NEXT:    s_mov_b32 s14, s2
+; SI-NEXT:    s_mov_b32 s15, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
-; SI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_copy_v4i8_x2:
@@ -77,17 +79,19 @@ define amdgpu_kernel void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s8, s6
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    flat_load_dword v0, v[0:1]
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
-; VI-NEXT:    s_mov_b32 s6, s2
-; VI-NEXT:    s_mov_b32 s7, s3
+; VI-NEXT:    s_mov_b32 s0, s4
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s9, s7
+; VI-NEXT:    s_mov_b32 s10, s2
+; VI-NEXT:    s_mov_b32 s11, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
 ; VI-NEXT:    s_endpgm
   %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
@@ -102,26 +106,28 @@ define amdgpu_kernel void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
-; SI-NEXT:    s_mov_b32 s18, 0
-; SI-NEXT:    s_mov_b32 s19, s11
+; SI-NEXT:    s_mov_b32 s22, 0
+; SI-NEXT:    s_mov_b32 s23, s11
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[16:17], s[6:7]
+; SI-NEXT:    s_mov_b64 s[20:21], s[6:7]
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
-; SI-NEXT:    buffer_load_dword v0, v[0:1], s[16:19], 0 addr64
+; SI-NEXT:    buffer_load_dword v0, v[0:1], s[20:23], 0 addr64
 ; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s0
+; SI-NEXT:    s_mov_b32 s9, s1
 ; SI-NEXT:    s_mov_b32 s12, s2
 ; SI-NEXT:    s_mov_b32 s13, s3
-; SI-NEXT:    s_mov_b32 s2, s10
-; SI-NEXT:    s_mov_b32 s3, s11
-; SI-NEXT:    s_mov_b32 s8, s4
-; SI-NEXT:    s_mov_b32 s9, s5
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
-; SI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
 ; SI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[16:19], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_copy_v4i8_x3:
@@ -136,17 +142,19 @@ define amdgpu_kernel void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v0
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b32 s8, s0
+; VI-NEXT:    s_mov_b32 s9, s1
 ; VI-NEXT:    s_mov_b32 s12, s2
 ; VI-NEXT:    s_mov_b32 s13, s3
-; VI-NEXT:    s_mov_b32 s2, s10
-; VI-NEXT:    s_mov_b32 s3, s11
-; VI-NEXT:    s_mov_b32 s8, s4
-; VI-NEXT:    s_mov_b32 s9, s5
 ; VI-NEXT:    s_mov_b32 s15, s11
+; VI-NEXT:    s_mov_b32 s16, s4
+; VI-NEXT:    s_mov_b32 s17, s5
+; VI-NEXT:    s_mov_b32 s18, s10
+; VI-NEXT:    s_mov_b32 s19, s11
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
-; VI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
 ; VI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[16:19], 0
 ; VI-NEXT:    s_endpgm
   %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
@@ -170,50 +178,58 @@ define amdgpu_kernel void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    buffer_load_dword v0, v[0:1], s[12:15], 0 addr64
 ; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s20, s8
+; SI-NEXT:    s_mov_b32 s21, s9
+; SI-NEXT:    s_mov_b32 s8, s10
+; SI-NEXT:    s_mov_b32 s9, s11
 ; SI-NEXT:    s_mov_b32 s16, s6
 ; SI-NEXT:    s_mov_b32 s17, s7
-; SI-NEXT:    s_mov_b32 s6, s2
-; SI-NEXT:    s_mov_b32 s7, s3
-; SI-NEXT:    s_mov_b32 s0, s10
-; SI-NEXT:    s_mov_b32 s1, s11
-; SI-NEXT:    s_mov_b32 s10, s2
-; SI-NEXT:    s_mov_b32 s11, s3
 ; SI-NEXT:    s_mov_b32 s18, s2
 ; SI-NEXT:    s_mov_b32 s19, s3
+; SI-NEXT:    s_mov_b32 s22, s2
+; SI-NEXT:    s_mov_b32 s23, s3
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; SI-NEXT:    buffer_store_dword v0, off, s[16:19], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[20:23], 0
 ; SI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_copy_v4i8_x4:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x44
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x44
 ; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_mov_b32 s0, s10
-; VI-NEXT:    v_mov_b32_e32 v1, s13
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s12, v0
+; VI-NEXT:    s_mov_b32 s16, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_mov_b32 s0, s4
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s17, s9
+; VI-NEXT:    s_mov_b32 s8, s10
+; VI-NEXT:    s_mov_b32 s9, s11
 ; VI-NEXT:    s_mov_b32 s12, s6
 ; VI-NEXT:    s_mov_b32 s13, s7
-; VI-NEXT:    s_mov_b32 s6, s2
-; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s1, s11
-; VI-NEXT:    s_mov_b32 s10, s2
-; VI-NEXT:    s_mov_b32 s11, s3
 ; VI-NEXT:    s_mov_b32 s14, s2
 ; VI-NEXT:    s_mov_b32 s15, s3
+; VI-NEXT:    s_mov_b32 s18, s2
+; VI-NEXT:    s_mov_b32 s19, s3
+; VI-NEXT:    s_mov_b32 s10, s2
+; VI-NEXT:    s_mov_b32 s11, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; VI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[16:19], 0
 ; VI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
-; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
@@ -239,9 +255,11 @@ define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0
 ; SI-NEXT:    buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
 ; SI-NEXT:    s_mov_b32 s12, 0xff00
 ; SI-NEXT:    s_movk_i32 s13, 0xff
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
 ; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s6
-; SI-NEXT:    s_mov_b32 s1, s7
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
 ; SI-NEXT:    s_mov_b32 s6, s2
 ; SI-NEXT:    s_mov_b32 s7, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -259,8 +277,8 @@ define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 0x9000000, v1
-; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
-; SI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v1, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_copy_v4i8_extra_use:
@@ -270,16 +288,18 @@ define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0
 ; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; VI-NEXT:    s_movk_i32 s10, 0x900
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s9
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, s8, v0
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    flat_load_dword v0, v[0:1]
 ; VI-NEXT:    s_movk_i32 s8, 0xff00
 ; VI-NEXT:    s_movk_i32 s9, 0xff
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s2
 ; VI-NEXT:    s_mov_b32 s7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -296,8 +316,8 @@ define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0
 ; VI-NEXT:    v_add_u16_e32 v2, s10, v2
 ; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; VI-NEXT:    v_or_b32_e32 v1, v2, v1
-; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
-; VI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v1, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
   %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
@@ -312,44 +332,46 @@ define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0
 define amdgpu_kernel void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
 ; SI-LABEL: test_copy_v4i8_x2_extra_use:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x9
-; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s18, 0
-; SI-NEXT:    s_mov_b32 s19, s3
+; SI-NEXT:    s_mov_b32 s19, s11
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[16:17], s[10:11]
+; SI-NEXT:    s_mov_b64 s[16:17], s[6:7]
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    buffer_load_dword v0, v[0:1], s[16:19], 0 addr64
-; SI-NEXT:    s_mov_b32 s0, s8
-; SI-NEXT:    s_mov_b32 s1, s9
-; SI-NEXT:    s_mov_b32 s8, 0xff00
-; SI-NEXT:    s_movk_i32 s9, 0xff
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s14, s2
-; SI-NEXT:    s_mov_b32 s15, s3
-; SI-NEXT:    s_mov_b32 s6, s2
-; SI-NEXT:    s_mov_b32 s7, s3
+; SI-NEXT:    s_mov_b32 s12, s4
+; SI-NEXT:    s_mov_b32 s13, s5
+; SI-NEXT:    s_mov_b32 s4, 0xff00
+; SI-NEXT:    s_movk_i32 s5, 0xff
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s8, s0
+; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    s_mov_b32 s0, s2
+; SI-NEXT:    s_mov_b32 s1, s3
+; SI-NEXT:    s_mov_b32 s2, s10
+; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_mov_b32 s15, s11
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v3, vcc, 9, v0
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; SI-NEXT:    v_and_b32_e32 v4, s8, v1
+; SI-NEXT:    v_and_b32_e32 v4, s4, v1
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 9, v1
-; SI-NEXT:    v_and_b32_e32 v2, s8, v0
-; SI-NEXT:    v_and_b32_e32 v3, s9, v3
+; SI-NEXT:    v_and_b32_e32 v2, s4, v0
+; SI-NEXT:    v_and_b32_e32 v3, s5, v3
 ; SI-NEXT:    v_or_b32_e32 v2, v2, v3
-; SI-NEXT:    v_and_b32_e32 v1, s9, v1
+; SI-NEXT:    v_and_b32_e32 v1, s5, v1
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, 0x900, v2
 ; SI-NEXT:    v_or_b32_e32 v1, v4, v1
 ; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v1, vcc, 0x9000000, v1
-; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
-; SI-NEXT:    buffer_store_dword v1, off, s[12:15], 0
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_copy_v4i8_x2_extra_use:
@@ -364,16 +386,18 @@ define amdgpu_kernel void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %o
 ; VI-NEXT:    v_mov_b32_e32 v1, s7
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    flat_load_dword v0, v[0:1]
-; VI-NEXT:    s_mov_b32 s8, s4
+; VI-NEXT:    s_mov_b32 s12, s4
 ; VI-NEXT:    s_movk_i32 s4, 0xff00
-; VI-NEXT:    s_mov_b32 s9, s5
+; VI-NEXT:    s_mov_b32 s13, s5
 ; VI-NEXT:    s_movk_i32 s5, 0xff
 ; VI-NEXT:    s_movk_i32 s6, 0x900
-; VI-NEXT:    s_mov_b32 s12, s2
-; VI-NEXT:    s_mov_b32 s13, s3
-; VI-NEXT:    s_mov_b32 s15, s11
+; VI-NEXT:    s_mov_b32 s8, s0
+; VI-NEXT:    s_mov_b32 s9, s1
+; VI-NEXT:    s_mov_b32 s0, s2
+; VI-NEXT:    s_mov_b32 s1, s3
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
+; VI-NEXT:    s_mov_b32 s15, s11
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
 ; VI-NEXT:    v_and_b32_e32 v4, s4, v1
@@ -388,9 +412,9 @@ define amdgpu_kernel void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %o
 ; VI-NEXT:    v_add_u16_e32 v2, s6, v2
 ; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; VI-NEXT:    v_or_b32_e32 v1, v2, v1
-; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
-; VI-NEXT:    buffer_store_dword v1, off, s[12:15], 0
 ; VI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; VI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[12:15], 0
 ; VI-NEXT:    s_endpgm
   %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
   %in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index eb95efd18609..24d798503b24 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -8,7 +8,7 @@
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1.0, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2.0, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4.0, [[V2]], [[C3]]
-; GCN: store_dword v[{{[0-9:]+}}], [[V3]]
+; GCN:     store_dword v[{{[0-9:]+}}], [[V3]]
 define amdgpu_kernel void @float4_extelt(float addrspace(1)* %out, i32 %sel) {
 entry:
   %ext = extractelement <4 x float> <float 0.0, float 1.0, float 2.0, float 4.0>, i32 %sel
@@ -160,6 +160,40 @@ entry:
   ret void
 }
 
+; TODO: Should be able to copy to m0 only once and increment base instead.
+
+; GCN-LABEL: {{^}}double8_extelt:
+; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0
+; GCN-DAG: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ZERO]]
+; GCN-DAG: s_mov_b32 m0, [[IND0:s[0-9]+]]
+; GCN-DAG: s_or_b32 [[IND1:s[0-9]+]], [[IND0]], 1
+; GCN-DAG: v_movrels_b32_e32 v[[RES_LO:[0-9]+]], [[BASE]]
+; GCN:     s_mov_b32 m0, [[IND1:s[0-9]+]]
+; GCN:     v_movrels_b32_e32 v[[RES_HI:[0-9]+]], [[BASE]]
+; GCN:     store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[RES_LO]]:[[RES_HI]]]
+define amdgpu_kernel void @double8_extelt(double addrspace(1)* %out, i32 %sel) {
+entry:
+  %ext = extractelement <8 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0, double 8.0>, i32 %sel
+  store double %ext, double addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}double7_extelt:
+; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0
+; GCN-DAG: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ZERO]]
+; GCN-DAG: s_mov_b32 m0, [[IND0:s[0-9]+]]
+; GCN-DAG: s_or_b32 [[IND1:s[0-9]+]], [[IND0]], 1
+; GCN-DAG: v_movrels_b32_e32 v[[RES_LO:[0-9]+]], [[BASE]]
+; GCN:     s_mov_b32 m0, [[IND1:s[0-9]+]]
+; GCN:     v_movrels_b32_e32 v[[RES_HI:[0-9]+]], [[BASE]]
+; GCN:     store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[RES_LO]]:[[RES_HI]]]
+define amdgpu_kernel void @double7_extelt(double addrspace(1)* %out, i32 %sel) {
+entry:
+  %ext = extractelement <7 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0>, i32 %sel
+  store double %ext, double addrspace(1)* %out
+  ret void
+}
+
 ; GCN-LABEL: {{^}}float16_extelt:
 ; GCN-NOT: buffer_
 ; GCN-DAG: s_mov_b32 m0,

diff  --git a/llvm/test/CodeGen/AMDGPU/idot2.ll b/llvm/test/CodeGen/AMDGPU/idot2.ll
index 2844898bf2f3..42921dc8c2ba 100644
--- a/llvm/test/CodeGen/AMDGPU/idot2.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot2.ll
@@ -2674,19 +2674,21 @@ define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
 ; GFX7-LABEL: notsdot2_sext8:
 ; GFX7:       ; %bb.0: ; %entry
 ; GFX7-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX7-NEXT:    s_mov_b32 s2, -1
 ; GFX7-NEXT:    s_mov_b32 s10, s2
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    s_mov_b32 s8, s6
-; GFX7-NEXT:    s_mov_b32 s9, s7
+; GFX7-NEXT:    s_mov_b32 s0, s4
+; GFX7-NEXT:    s_mov_b32 s1, s5
+; GFX7-NEXT:    s_mov_b32 s4, s6
+; GFX7-NEXT:    s_mov_b32 s5, s7
 ; GFX7-NEXT:    s_mov_b32 s6, s2
 ; GFX7-NEXT:    s_mov_b32 s7, s3
+; GFX7-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
+; GFX7-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
+; GFX7-NEXT:    s_load_dword s0, s[8:9], 0x0
 ; GFX7-NEXT:    s_mov_b32 s11, s3
-; GFX7-NEXT:    buffer_load_ushort v0, off, s[4:7], 0
-; GFX7-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
-; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x0
 ; GFX7-NEXT:    s_waitcnt vmcnt(1)
 ; GFX7-NEXT:    v_bfe_i32 v2, v0, 0, 8
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
@@ -2694,9 +2696,9 @@ define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
 ; GFX7-NEXT:    v_bfe_i32 v0, v0, 8, 8
 ; GFX7-NEXT:    v_bfe_i32 v1, v1, 8, 8
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    v_mad_i32_i24 v0, v1, v0, s4
+; GFX7-NEXT:    v_mad_i32_i24 v0, v1, v0, s0
 ; GFX7-NEXT:    v_mad_i32_i24 v0, v3, v2, v0
-; GFX7-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT:    buffer_store_dword v0, off, s[8:11], 0
 ; GFX7-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: notsdot2_sext8:
@@ -2704,23 +2706,23 @@ define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NEXT:    v_mov_b32_e32 v1, s7
-; GFX8-NEXT:    v_mov_b32_e32 v2, s4
-; GFX8-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8-NEXT:    flat_load_ushort v2, v[2:3]
 ; GFX8-NEXT:    flat_load_ushort v0, v[0:1]
-; GFX8-NEXT:    flat_load_ushort v1, v[2:3]
 ; GFX8-NEXT:    s_load_dword s2, s[0:1], 0x0
 ; GFX8-NEXT:    s_waitcnt vmcnt(1) lgkmcnt(0)
-; GFX8-NEXT:    v_bfe_i32 v3, v0, 0, 8
-; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 8, v0
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX8-NEXT:    v_lshrrev_b16_e32 v2, 8, v2
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_bfe_i32 v2, v1, 0, 8
-; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 8, v1
-; GFX8-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX8-NEXT:    v_bfe_i32 v1, v0, 0, 8
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 8, v0
 ; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GFX8-NEXT:    v_mad_i32_i24 v0, v0, v1, s2
-; GFX8-NEXT:    v_mad_i32_i24 v2, v3, v2, v0
+; GFX8-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX8-NEXT:    v_mad_i32_i24 v0, v2, v0, s2
+; GFX8-NEXT:    v_mad_i32_i24 v2, v3, v1, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_store_dword v[0:1], v2
@@ -2731,24 +2733,24 @@ define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
 ; GFX9-NODL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX9-NODL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-NODL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NODL-NEXT:    v_mov_b32_e32 v0, s6
-; GFX9-NODL-NEXT:    v_mov_b32_e32 v1, s7
-; GFX9-NODL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NODL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-NODL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NODL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NODL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9-NODL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9-NODL-NEXT:    global_load_ushort v2, v[2:3], off
 ; GFX9-NODL-NEXT:    global_load_ushort v0, v[0:1], off
-; GFX9-NODL-NEXT:    global_load_ushort v1, v[2:3], off
 ; GFX9-NODL-NEXT:    s_load_dword s2, s[0:1], 0x0
 ; GFX9-NODL-NEXT:    s_waitcnt vmcnt(1)
-; GFX9-NODL-NEXT:    v_bfe_i32 v3, v0, 0, 8
-; GFX9-NODL-NEXT:    v_lshrrev_b16_e32 v0, 8, v0
+; GFX9-NODL-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX9-NODL-NEXT:    v_lshrrev_b16_e32 v2, 8, v2
 ; GFX9-NODL-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NODL-NEXT:    v_bfe_i32 v2, v1, 0, 8
-; GFX9-NODL-NEXT:    v_lshrrev_b16_e32 v1, 8, v1
-; GFX9-NODL-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX9-NODL-NEXT:    v_bfe_i32 v1, v0, 0, 8
+; GFX9-NODL-NEXT:    v_lshrrev_b16_e32 v0, 8, v0
 ; GFX9-NODL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX9-NODL-NEXT:    v_bfe_i32 v2, v2, 0, 8
 ; GFX9-NODL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NODL-NEXT:    v_mad_i32_i24 v0, v0, v1, s2
-; GFX9-NODL-NEXT:    v_mad_i32_i24 v2, v3, v2, v0
+; GFX9-NODL-NEXT:    v_mad_i32_i24 v0, v2, v0, s2
+; GFX9-NODL-NEXT:    v_mad_i32_i24 v2, v3, v1, v0
 ; GFX9-NODL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NODL-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NODL-NEXT:    global_store_dword v[0:1], v2, off
@@ -2759,24 +2761,24 @@ define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
 ; GFX9-DL-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX9-DL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-DL-NEXT:    v_mov_b32_e32 v0, s6
-; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s7
-; GFX9-DL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-DL-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-DL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-DL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9-DL-NEXT:    v_mov_b32_e32 v3, s7
+; GFX9-DL-NEXT:    global_load_ushort v2, v[2:3], off
 ; GFX9-DL-NEXT:    global_load_ushort v0, v[0:1], off
-; GFX9-DL-NEXT:    global_load_ushort v1, v[2:3], off
 ; GFX9-DL-NEXT:    s_load_dword s2, s[0:1], 0x0
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(1)
-; GFX9-DL-NEXT:    v_bfe_i32 v3, v0, 0, 8
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v0, 8, v0
+; GFX9-DL-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v2, 8, v2
 ; GFX9-DL-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-DL-NEXT:    v_bfe_i32 v2, v1, 0, 8
-; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v1, 8, v1
-; GFX9-DL-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v1, v0, 0, 8
+; GFX9-DL-NEXT:    v_lshrrev_b16_e32 v0, 8, v0
 ; GFX9-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX9-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
 ; GFX9-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v0, v1, s2
-; GFX9-DL-NEXT:    v_mad_i32_i24 v2, v3, v2, v0
+; GFX9-DL-NEXT:    v_mad_i32_i24 v0, v2, v0, s2
+; GFX9-DL-NEXT:    v_mad_i32_i24 v2, v3, v1, v0
 ; GFX9-DL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-DL-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-DL-NEXT:    global_store_dword v[0:1], v2, off
@@ -2788,24 +2790,24 @@ define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
 ; GFX10-DL-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX10-DL-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-DL-NEXT:    v_mov_b32_e32 v2, s4
-; GFX10-DL-NEXT:    v_mov_b32_e32 v3, s5
-; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s6
-; GFX10-DL-NEXT:    v_mov_b32_e32 v1, s7
-; GFX10-DL-NEXT:    global_load_ushort v2, v[2:3], off
+; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX10-DL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX10-DL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX10-DL-NEXT:    v_mov_b32_e32 v3, s7
 ; GFX10-DL-NEXT:    global_load_ushort v0, v[0:1], off
+; GFX10-DL-NEXT:    global_load_ushort v1, v[2:3], off
 ; GFX10-DL-NEXT:    s_load_dword s2, s[0:1], 0x0
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-DL-NEXT:    v_lshrrev_b16_e64 v1, 8, v2
+; GFX10-DL-NEXT:    v_lshrrev_b16_e64 v2, 8, v0
 ; GFX10-DL-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-DL-NEXT:    v_lshrrev_b16_e64 v3, 8, v0
-; GFX10-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX10-DL-NEXT:    v_lshrrev_b16_e64 v3, 8, v1
 ; GFX10-DL-NEXT:    v_bfe_i32 v0, v0, 0, 8
 ; GFX10-DL-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX10-DL-NEXT:    v_bfe_i32 v2, v2, 0, 8
 ; GFX10-DL-NEXT:    v_bfe_i32 v3, v3, 0, 8
 ; GFX10-DL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-DL-NEXT:    v_mad_i32_i24 v1, v3, v1, s2
-; GFX10-DL-NEXT:    v_mad_i32_i24 v2, v0, v2, v1
+; GFX10-DL-NEXT:    v_mad_i32_i24 v2, v3, v2, s2
+; GFX10-DL-NEXT:    v_mad_i32_i24 v2, v1, v0, v2
 ; GFX10-DL-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX10-DL-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX10-DL-NEXT:    global_store_dword v[0:1], v2, off

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
index b311b6aa29d7..2eb8151d9c85 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
@@ -268,26 +268,16 @@ entry:
   ret void
 }
 
+; TODO: We should be able not to write to m0 twice and just increment base.
+
 ; GCN-LABEL: {{^}}double8_inselt:
 ; GCN-NOT: v_cndmask
-; GCN: buffer_store_dword
-; GCN: buffer_store_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
-; GCN: buffer_load_dword
+; GCN-NOT: buffer_
+; GCN-DAG: s_or_b32 [[IND1:s[0-9]+]], [[IND0:s[0-9]+]], 1
+; GCN-DAG: s_mov_b32 m0, [[IND0]]
+; GCN-DAG: v_movreld_b32_e32 [[BASE:v[0-9]+]],
+; GCN:     s_mov_b32 m0, [[IND1]]
+; GCN:     v_movreld_b32_e32 [[BASE]]
 define amdgpu_kernel void @double8_inselt(<8 x double> addrspace(1)* %out, <8 x double> %vec, i32 %sel) {
 entry:
   %v = insertelement <8 x double> %vec, double 1.000000e+00, i32 %sel
@@ -295,6 +285,21 @@ entry:
   ret void
 }
 
+; GCN-LABEL: {{^}}double7_inselt:
+; GCN-NOT: v_cndmask
+; GCN-NOT: buffer_
+; GCN-DAG: s_or_b32 [[IND1:s[0-9]+]], [[IND0:s[0-9]+]], 1
+; GCN-DAG: s_mov_b32 m0, [[IND0]]
+; GCN-DAG: v_movreld_b32_e32 [[BASE:v[0-9]+]],
+; GCN:     s_mov_b32 m0, [[IND1]]
+; GCN:     v_movreld_b32_e32 [[BASE]]
+define amdgpu_kernel void @double7_inselt(<7 x double> addrspace(1)* %out, <7 x double> %vec, i32 %sel) {
+entry:
+  %v = insertelement <7 x double> %vec, double 1.000000e+00, i32 %sel
+  store <7 x double> %v, <7 x double> addrspace(1)* %out
+  ret void
+}
+
 ; GCN-LABEL: {{^}}bit4_inselt:
 ; GCN: buffer_store_byte
 ; GCN: buffer_load_ubyte

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 942110ccc1a7..678f8613e037 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1362,7 +1362,7 @@ define amdgpu_kernel void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 add
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    s_cbranch_scc0 BB26_2
-; VI-NEXT: ; %bb.1: ; %else
+; VI-NEXT:  ; %bb.1: ; %else
 ; VI-NEXT:    s_load_dword s1, s[6:7], 0x4
 ; VI-NEXT:    s_mov_b64 s[2:3], 0
 ; VI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
@@ -1634,98 +1634,76 @@ define amdgpu_kernel void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)
 define amdgpu_kernel void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) #0 {
 ; SI-LABEL: dynamic_insertelement_v8f64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; SI-NEXT:    s_load_dwordx16 s[12:27], s[4:5], 0x10
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x10
 ; SI-NEXT:    s_load_dword s4, s[4:5], 0x20
-; SI-NEXT:    s_add_u32 s0, s0, s7
-; SI-NEXT:    s_addc_u32 s1, s1, 0
-; SI-NEXT:    v_mov_b32_e32 v16, 64
+; SI-NEXT:    v_mov_b32_e32 v16, 0x40200000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s12
-; SI-NEXT:    s_and_b32 s4, s4, 7
-; SI-NEXT:    s_lshl_b32 s4, s4, 3
-; SI-NEXT:    v_mov_b32_e32 v1, s13
-; SI-NEXT:    v_mov_b32_e32 v12, s24
-; SI-NEXT:    v_mov_b32_e32 v13, s25
-; SI-NEXT:    v_mov_b32_e32 v14, s26
-; SI-NEXT:    v_mov_b32_e32 v15, s27
-; SI-NEXT:    v_mov_b32_e32 v2, s14
-; SI-NEXT:    v_mov_b32_e32 v3, s15
-; SI-NEXT:    v_mov_b32_e32 v4, s16
-; SI-NEXT:    v_mov_b32_e32 v5, s17
-; SI-NEXT:    v_mov_b32_e32 v6, s18
-; SI-NEXT:    v_mov_b32_e32 v7, s19
-; SI-NEXT:    v_mov_b32_e32 v8, s20
-; SI-NEXT:    v_mov_b32_e32 v9, s21
-; SI-NEXT:    v_mov_b32_e32 v10, s22
-; SI-NEXT:    v_mov_b32_e32 v11, s23
-; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:112
-; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:96
-; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:80
-; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; SI-NEXT:    v_or_b32_e32 v16, s4, v16
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    v_mov_b32_e32 v1, 0x40200000
-; SI-NEXT:    buffer_store_dwordx2 v[0:1], v16, s[0:3], 0 offen
-; SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; SI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:80
-; SI-NEXT:    buffer_load_dwordx4 v[8:11], off, s[0:3], 0 offset:96
-; SI-NEXT:    buffer_load_dwordx4 v[12:15], off, s[0:3], 0 offset:112
-; SI-NEXT:    s_mov_b32 s11, 0x100f000
-; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[8:11], 0 offset:48
-; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[8:11], 0 offset:32
-; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
-; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    s_lshl_b32 s4, s4, 1
+; SI-NEXT:    s_mov_b32 m0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_mov_b32_e32 v3, s11
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_mov_b32_e32 v6, s14
+; SI-NEXT:    v_mov_b32_e32 v7, s15
+; SI-NEXT:    v_mov_b32_e32 v8, s16
+; SI-NEXT:    v_mov_b32_e32 v9, s17
+; SI-NEXT:    v_mov_b32_e32 v10, s18
+; SI-NEXT:    v_mov_b32_e32 v11, s19
+; SI-NEXT:    v_mov_b32_e32 v12, s20
+; SI-NEXT:    v_mov_b32_e32 v13, s21
+; SI-NEXT:    v_mov_b32_e32 v14, s22
+; SI-NEXT:    v_mov_b32_e32 v15, s23
+; SI-NEXT:    s_or_b32 s4, s4, 1
+; SI-NEXT:    v_movreld_b32_e32 v0, 0
+; SI-NEXT:    s_mov_b32 m0, s4
+; SI-NEXT:    v_movreld_b32_e32 v0, v16
+; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: dynamic_insertelement_v8f64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
-; VI-NEXT:    s_load_dwordx16 s[12:27], s[4:5], 0x40
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x40
 ; VI-NEXT:    s_load_dword s4, s[4:5], 0x80
-; VI-NEXT:    s_add_u32 s0, s0, s7
-; VI-NEXT:    s_addc_u32 s1, s1, 0
-; VI-NEXT:    v_mov_b32_e32 v16, 64
+; VI-NEXT:    v_mov_b32_e32 v16, 0x40200000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s12
-; VI-NEXT:    s_and_b32 s4, s4, 7
-; VI-NEXT:    s_lshl_b32 s4, s4, 3
-; VI-NEXT:    v_mov_b32_e32 v1, s13
-; VI-NEXT:    v_mov_b32_e32 v12, s24
-; VI-NEXT:    v_mov_b32_e32 v13, s25
-; VI-NEXT:    v_mov_b32_e32 v14, s26
-; VI-NEXT:    v_mov_b32_e32 v15, s27
-; VI-NEXT:    v_mov_b32_e32 v2, s14
-; VI-NEXT:    v_mov_b32_e32 v3, s15
-; VI-NEXT:    v_mov_b32_e32 v4, s16
-; VI-NEXT:    v_mov_b32_e32 v5, s17
-; VI-NEXT:    v_mov_b32_e32 v6, s18
-; VI-NEXT:    v_mov_b32_e32 v7, s19
-; VI-NEXT:    v_mov_b32_e32 v8, s20
-; VI-NEXT:    v_mov_b32_e32 v9, s21
-; VI-NEXT:    v_mov_b32_e32 v10, s22
-; VI-NEXT:    v_mov_b32_e32 v11, s23
-; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:112
-; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:96
-; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:80
-; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; VI-NEXT:    v_or_b32_e32 v16, s4, v16
-; VI-NEXT:    v_mov_b32_e32 v0, 0
-; VI-NEXT:    v_mov_b32_e32 v1, 0x40200000
-; VI-NEXT:    buffer_store_dwordx2 v[0:1], v16, s[0:3], 0 offen
-; VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; VI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:80
-; VI-NEXT:    buffer_load_dwordx4 v[8:11], off, s[0:3], 0 offset:96
-; VI-NEXT:    buffer_load_dwordx4 v[12:15], off, s[0:3], 0 offset:112
-; VI-NEXT:    s_mov_b32 s11, 0x1100f000
-; VI-NEXT:    s_mov_b32 s10, -1
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[8:11], 0 offset:48
-; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[8:11], 0 offset:32
-; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
-; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    s_lshl_b32 s4, s4, 1
+; VI-NEXT:    s_mov_b32 m0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_mov_b32_e32 v3, s11
+; VI-NEXT:    v_mov_b32_e32 v4, s12
+; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    v_mov_b32_e32 v6, s14
+; VI-NEXT:    v_mov_b32_e32 v7, s15
+; VI-NEXT:    v_mov_b32_e32 v8, s16
+; VI-NEXT:    v_mov_b32_e32 v9, s17
+; VI-NEXT:    v_mov_b32_e32 v10, s18
+; VI-NEXT:    v_mov_b32_e32 v11, s19
+; VI-NEXT:    v_mov_b32_e32 v12, s20
+; VI-NEXT:    v_mov_b32_e32 v13, s21
+; VI-NEXT:    v_mov_b32_e32 v14, s22
+; VI-NEXT:    v_mov_b32_e32 v15, s23
+; VI-NEXT:    s_or_b32 s4, s4, 1
+; VI-NEXT:    v_movreld_b32_e32 v0, 0
+; VI-NEXT:    s_mov_b32 m0, s4
+; VI-NEXT:    v_movreld_b32_e32 v0, v16
+; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x double> %a, double 8.0, i32 %b
   store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
index 1c5bb08957d0..3f3807b274df 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
@@ -1545,15 +1545,15 @@ define amdgpu_kernel void @simplify_bfe_u32_multi_use_arg(i32 addrspace(1)* %out
 ; SI-NEXT:    s_mov_b32 s7, s3
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
-; SI-NEXT:    s_mov_b32 s0, s10
-; SI-NEXT:    s_mov_b32 s1, s11
-; SI-NEXT:    s_mov_b32 s10, s2
-; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    s_mov_b32 s0, s8
+; SI-NEXT:    s_mov_b32 s1, s9
+; SI-NEXT:    s_mov_b32 s4, s10
+; SI-NEXT:    s_mov_b32 s5, s11
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_and_b32_e32 v0, 63, v0
 ; SI-NEXT:    v_bfe_u32 v1, v0, 2, 2
-; SI-NEXT:    buffer_store_dword v1, off, s[8:11], 0
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: simplify_bfe_u32_multi_use_arg:
@@ -1566,15 +1566,17 @@ define amdgpu_kernel void @simplify_bfe_u32_multi_use_arg(i32 addrspace(1)* %out
 ; VI-NEXT:    s_mov_b32 s11, s3
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    buffer_load_dword v0, off, s[8:11], 0
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
+; VI-NEXT:    s_mov_b32 s0, s4
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s2
 ; VI-NEXT:    s_mov_b32 s7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_and_b32_e32 v0, 63, v0
 ; VI-NEXT:    v_bfe_u32 v1, v0, 2, 2
-; VI-NEXT:    buffer_store_dword v1, off, s[4:7], 0
-; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
                                             i32 addrspace(1)* %out1,
                                             i32 addrspace(1)* %in) #0 {

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
index 14ca8225273d..c0c6f4f4b93b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
@@ -12,20 +12,20 @@ define amdgpu_kernel void @maxnum_f16(
 ; SI-LABEL: maxnum_f16:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; SI-NEXT:    s_mov_b32 s11, 0xf000
-; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s14, s2
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s12, s6
 ; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s15, s11
-; SI-NEXT:    s_mov_b32 s2, s10
-; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
 ; SI-NEXT:    buffer_load_ushort v0, off, s[12:15], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[0:3], 0
-; SI-NEXT:    s_mov_b32 s8, s4
-; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -34,7 +34,7 @@ define amdgpu_kernel void @maxnum_f16(
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_max_f32_e32 v0, v0, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: maxnum_f16:

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
index c28fd3abcbad..fd3e3212a8ce 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
@@ -12,20 +12,20 @@ define amdgpu_kernel void @minnum_f16_ieee(
 ; SI-LABEL: minnum_f16_ieee:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; SI-NEXT:    s_mov_b32 s11, 0xf000
-; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s14, s2
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s12, s6
 ; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s15, s11
-; SI-NEXT:    s_mov_b32 s2, s10
-; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
 ; SI-NEXT:    buffer_load_ushort v0, off, s[12:15], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[0:3], 0
-; SI-NEXT:    s_mov_b32 s8, s4
-; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
@@ -34,7 +34,7 @@ define amdgpu_kernel void @minnum_f16_ieee(
 ; SI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; SI-NEXT:    v_min_f32_e32 v0, v0, v1
 ; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: minnum_f16_ieee:

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index efdbde63ce19..7d0d4eee1f04 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -254,9 +254,9 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_mov_b32_e32 v0, s17
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s19, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s11
+; SI-NEXT:    v_mov_b32_e32 v1, s11
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s19, 51
-; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v4, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s16
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v2, s10
@@ -267,6 +267,7 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
 ; SI-NEXT:    s_brev_b32 s16, -2
 ; SI-NEXT:    v_mov_b32_e32 v12, 0x3ff00000
+; SI-NEXT:    v_mov_b32_e32 v4, s11
 ; SI-NEXT:    v_bfi_b32 v4, s16, v12, v4
 ; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s17
 ; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
@@ -278,9 +279,9 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s17, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v6, s9
+; SI-NEXT:    v_mov_b32_e32 v1, s9
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s17, 51
-; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v6, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
@@ -288,6 +289,7 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[0:1]
 ; SI-NEXT:    s_bfe_u32 s0, s15, 0xb0014
 ; SI-NEXT:    s_add_i32 s10, s0, s18
+; SI-NEXT:    v_mov_b32_e32 v6, s9
 ; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s10
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
 ; SI-NEXT:    s_andn2_b64 s[8:9], s[14:15], s[0:1]
@@ -298,15 +300,16 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_mov_b32_e32 v4, s9
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; SI-NEXT:    v_mov_b32_e32 v10, s15
+; SI-NEXT:    v_mov_b32_e32 v5, s15
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s10, 51
-; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v10, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v6, s14
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[0:1]
 ; SI-NEXT:    v_add_f64 v[6:7], s[14:15], -v[4:5]
 ; SI-NEXT:    s_bfe_u32 s0, s13, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v10, s15
 ; SI-NEXT:    s_add_i32 s8, s0, s18
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
 ; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s8
@@ -319,19 +322,20 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_mov_b32_e32 v5, s0
 ; SI-NEXT:    v_mov_b32_e32 v4, s3
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s8, 0
-; SI-NEXT:    v_mov_b32_e32 v13, s13
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s13
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s8, 51
-; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v13, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v4, s2
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v10, s12
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[0:1]
 ; SI-NEXT:    v_add_f64 v[10:11], s[12:13], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v12, s16, v12, v13
+; SI-NEXT:    v_mov_b32_e32 v13, s13
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[10:11]|, 0.5
-; SI-NEXT:    v_mov_b32_e32 v10, 0
+; SI-NEXT:    v_bfi_b32 v12, s16, v12, v13
 ; SI-NEXT:    v_cndmask_b32_e32 v11, 0, v12, vcc
+; SI-NEXT:    v_mov_b32_e32 v10, 0
 ; SI-NEXT:    v_mov_b32_e32 v8, 0
 ; SI-NEXT:    v_add_f64 v[4:5], v[4:5], v[10:11]
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
@@ -408,9 +412,9 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_mov_b32_e32 v0, s25
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s26, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s11
+; SI-NEXT:    v_mov_b32_e32 v1, s11
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s26, 51
-; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v4, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v0, s24
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v2, s10
@@ -421,6 +425,7 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
 ; SI-NEXT:    s_brev_b32 s24, -2
 ; SI-NEXT:    v_mov_b32_e32 v18, 0x3ff00000
+; SI-NEXT:    v_mov_b32_e32 v4, s11
 ; SI-NEXT:    v_bfi_b32 v4, s24, v18, v4
 ; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s25
 ; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
@@ -432,15 +437,16 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s25, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v6, s9
+; SI-NEXT:    v_mov_b32_e32 v1, s9
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s25, 51
-; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v6, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[2:3]
 ; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[0:1]
 ; SI-NEXT:    s_bfe_u32 s2, s15, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v6, s9
 ; SI-NEXT:    s_add_i32 s10, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
 ; SI-NEXT:    v_bfi_b32 v6, s24, v18, v6
@@ -454,15 +460,16 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_mov_b32_e32 v4, s9
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; SI-NEXT:    v_mov_b32_e32 v8, s15
+; SI-NEXT:    v_mov_b32_e32 v5, s15
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s10, 51
-; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v8, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v6, s14
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[2:3]
 ; SI-NEXT:    v_add_f64 v[6:7], s[14:15], -v[4:5]
 ; SI-NEXT:    s_bfe_u32 s2, s13, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v8, s15
 ; SI-NEXT:    s_add_i32 s10, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
 ; SI-NEXT:    v_bfi_b32 v8, s24, v18, v8
@@ -476,15 +483,16 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_mov_b32_e32 v4, s9
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; SI-NEXT:    v_mov_b32_e32 v10, s13
+; SI-NEXT:    v_mov_b32_e32 v5, s13
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s10, 51
-; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v10, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v8, s12
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s[2:3]
 ; SI-NEXT:    v_add_f64 v[8:9], s[12:13], -v[4:5]
 ; SI-NEXT:    s_bfe_u32 s2, s19, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v10, s13
 ; SI-NEXT:    s_add_i32 s10, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[8:9]|, 0.5
 ; SI-NEXT:    v_bfi_b32 v10, s24, v18, v10
@@ -498,9 +506,9 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_mov_b32_e32 v8, s9
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
-; SI-NEXT:    v_mov_b32_e32 v19, s19
+; SI-NEXT:    v_mov_b32_e32 v9, s19
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s10, 51
-; SI-NEXT:    v_cndmask_b32_e64 v13, v8, v19, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v13, v8, v9, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v8, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v8, v8, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v9, s18
@@ -512,15 +520,17 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    s_bfe_u32 s2, s23, 0xb0014
 ; SI-NEXT:    s_add_i32 s14, s2, s7
 ; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s14
+; SI-NEXT:    v_mov_b32_e32 v8, s19
 ; SI-NEXT:    s_andn2_b64 s[10:11], s[22:23], s[2:3]
 ; SI-NEXT:    s_and_b32 s2, s23, s27
+; SI-NEXT:    v_bfi_b32 v19, s24, v18, v8
 ; SI-NEXT:    v_mov_b32_e32 v9, s2
 ; SI-NEXT:    v_mov_b32_e32 v8, s11
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s14, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
-; SI-NEXT:    v_mov_b32_e32 v16, s23
+; SI-NEXT:    v_mov_b32_e32 v9, s23
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s14, 51
-; SI-NEXT:    v_cndmask_b32_e64 v9, v8, v16, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v9, v8, v9, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v8, s10
 ; SI-NEXT:    v_cndmask_b32_e64 v8, v8, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v10, s22
@@ -535,22 +545,24 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s7, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v10, v10, v11, vcc
 ; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s7, 51
-; SI-NEXT:    v_mov_b32_e32 v17, s21
-; SI-NEXT:    v_cndmask_b32_e64 v15, v10, v17, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v11, s21
+; SI-NEXT:    v_cndmask_b32_e64 v15, v10, v11, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v10, s4
 ; SI-NEXT:    v_cndmask_b32_e64 v10, v10, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v11, s20
 ; SI-NEXT:    v_cndmask_b32_e64 v14, v10, v11, s[2:3]
 ; SI-NEXT:    v_add_f64 v[10:11], s[20:21], -v[14:15]
-; SI-NEXT:    v_bfi_b32 v16, s24, v18, v16
+; SI-NEXT:    v_mov_b32_e32 v17, s23
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[10:11]|, 0.5
 ; SI-NEXT:    v_add_f64 v[10:11], s[22:23], -v[8:9]
-; SI-NEXT:    v_bfi_b32 v17, s24, v18, v17
+; SI-NEXT:    v_mov_b32_e32 v16, s21
 ; SI-NEXT:    v_cmp_ge_f64_e64 s[2:3], |v[10:11]|, 0.5
+; SI-NEXT:    v_bfi_b32 v17, s24, v18, v17
+; SI-NEXT:    v_cndmask_b32_e64 v11, 0, v17, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v10, 0
-; SI-NEXT:    v_cndmask_b32_e64 v11, 0, v16, s[2:3]
+; SI-NEXT:    v_bfi_b32 v16, s24, v18, v16
 ; SI-NEXT:    v_add_f64 v[10:11], v[8:9], v[10:11]
-; SI-NEXT:    v_cndmask_b32_e32 v9, 0, v17, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v9, 0, v16, vcc
 ; SI-NEXT:    v_mov_b32_e32 v8, 0
 ; SI-NEXT:    s_and_b32 s13, s17, s27
 ; SI-NEXT:    v_add_f64 v[8:9], v[14:15], v[8:9]
@@ -563,10 +575,10 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_cndmask_b32_e64 v17, v14, v15, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v14, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v14, v14, 0, vcc
-; SI-NEXT:    v_mov_b32_e32 v16, s16
-; SI-NEXT:    v_cndmask_b32_e64 v16, v14, v16, s[2:3]
-; SI-NEXT:    v_bfi_b32 v19, s24, v18, v19
-; SI-NEXT:    v_bfi_b32 v18, s24, v18, v15
+; SI-NEXT:    v_mov_b32_e32 v15, s16
+; SI-NEXT:    v_cndmask_b32_e64 v16, v14, v15, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v14, s17
+; SI-NEXT:    v_bfi_b32 v18, s24, v18, v14
 ; SI-NEXT:    v_add_f64 v[14:15], s[16:17], -v[16:17]
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[14:15]|, 0.5

diff  --git a/llvm/test/CodeGen/AMDGPU/saddo.ll b/llvm/test/CodeGen/AMDGPU/saddo.ll
index 4dc49963203d..f0a7a8067081 100644
--- a/llvm/test/CodeGen/AMDGPU/saddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/saddo.ll
@@ -160,29 +160,33 @@ define amdgpu_kernel void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
 define amdgpu_kernel void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
 ; SI-LABEL: v_saddo_i32:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x9
-; SI-NEXT:    s_mov_b32 s15, 0xf000
-; SI-NEXT:    s_mov_b32 s14, -1
-; SI-NEXT:    s_mov_b32 s2, s14
-; SI-NEXT:    s_mov_b32 s3, s15
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_mov_b32 s15, s11
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s0, s10
-; SI-NEXT:    s_mov_b32 s1, s11
-; SI-NEXT:    s_mov_b32 s10, s14
-; SI-NEXT:    s_mov_b32 s11, s15
-; SI-NEXT:    buffer_load_dword v0, off, s[8:11], 0
-; SI-NEXT:    buffer_load_dword v1, off, s[0:3], 0
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s6, s14
-; SI-NEXT:    s_mov_b32 s7, s15
+; SI-NEXT:    s_mov_b32 s8, s0
+; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s2, s10
+; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s6, s10
+; SI-NEXT:    s_mov_b32 s7, s11
+; SI-NEXT:    buffer_load_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
 ; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v1
 ; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], v2, v0
 ; SI-NEXT:    s_xor_b64 s[0:1], vcc, s[0:1]
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    buffer_store_dword v2, off, s[8:11], 0
 ; SI-NEXT:    buffer_store_byte v0, off, s[12:15], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -190,48 +194,48 @@ define amdgpu_kernel void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_mov_b32_e32 v6, s4
-; VI-NEXT:    v_mov_b32_e32 v7, s5
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    flat_load_dword v6, v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v7, s7
 ; VI-NEXT:    flat_load_dword v4, v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v2, s0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    flat_load_dword v5, v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v4, v6
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v4
-; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], v5, v6
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v5, v4
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v5
+; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], v6, v4
 ; VI-NEXT:    s_xor_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    flat_store_dword v[2:3], v5
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    flat_store_dword v[0:1], v6
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: v_saddo_i32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v4, s6
-; GFX9-NEXT:    v_mov_b32_e32 v6, s4
-; GFX9-NEXT:    v_mov_b32_e32 v7, s5
-; GFX9-NEXT:    v_mov_b32_e32 v5, s7
-; GFX9-NEXT:    global_load_dword v6, v[6:7], off
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v5, s5
+; GFX9-NEXT:    v_mov_b32_e32 v6, s6
+; GFX9-NEXT:    v_mov_b32_e32 v7, s7
 ; GFX9-NEXT:    global_load_dword v4, v[4:5], off
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    global_load_dword v5, v[6:7], off
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_add_u32_e32 v5, v6, v4
-; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v4
-; GFX9-NEXT:    v_cmp_lt_i32_e64 s[0:1], v5, v6
+; GFX9-NEXT:    v_add_u32_e32 v6, v4, v5
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v5
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[0:1], v6, v4
 ; GFX9-NEXT:    s_xor_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    global_store_dword v[2:3], v5, off
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GFX9-NEXT:    global_store_byte v[0:1], v2, off
+; GFX9-NEXT:    global_store_dword v[0:1], v6, off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_byte v[2:3], v0, off
 ; GFX9-NEXT:    s_endpgm
   %a = load i32, i32 addrspace(1)* %aptr, align 4
   %b = load i32, i32 addrspace(1)* %bptr, align 4
@@ -255,62 +259,64 @@ define amdgpu_kernel void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
 ; SI-NEXT:    s_addc_u32 s13, s5, s7
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    v_cmp_lt_i64_e32 vcc, s[12:13], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v0, s12
 ; SI-NEXT:    v_cmp_lt_i64_e64 s[4:5], s[6:7], 0
-; SI-NEXT:    s_mov_b32 s8, s2
-; SI-NEXT:    s_mov_b32 s9, s3
+; SI-NEXT:    v_mov_b32_e32 v0, s12
+; SI-NEXT:    s_mov_b32 s8, s0
+; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s13
+; SI-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; SI-NEXT:    s_mov_b32 s0, s2
+; SI-NEXT:    s_mov_b32 s1, s3
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
 ; SI-NEXT:    s_mov_b32 s2, s10
 ; SI-NEXT:    s_mov_b32 s3, s11
-; SI-NEXT:    v_mov_b32_e32 v1, s13
-; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
-; SI-NEXT:    s_xor_b64 s[0:1], s[4:5], vcc
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT:    buffer_store_byte v0, off, s[8:11], 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: s_saddo_i64:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    v_mov_b32_e32 v4, s4
 ; VI-NEXT:    s_add_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    s_addc_u32 s1, s5, s7
 ; VI-NEXT:    v_mov_b32_e32 v5, s5
 ; VI-NEXT:    v_cmp_lt_i64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
 ; VI-NEXT:    v_mov_b32_e32 v5, s1
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    s_xor_b64 s[0:1], s[2:3], vcc
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: s_saddo_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s4
 ; GFX9-NEXT:    s_add_u32 s0, s4, s6
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    s_addc_u32 s1, s5, s7
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s5
 ; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[0:1], v[4:5]
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX9-NEXT:    s_xor_b64 s[0:1], s[2:3], vcc
-; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[4:5], off
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GFX9-NEXT:    global_store_byte v[0:1], v2, off
+; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_byte v[2:3], v0, off
 ; GFX9-NEXT:    s_endpgm
   %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
   %val = extractvalue { i64, i1 } %sadd, 0
@@ -323,28 +329,32 @@ define amdgpu_kernel void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
 define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
 ; SI-LABEL: v_saddo_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x9
-; SI-NEXT:    s_mov_b32 s15, 0xf000
-; SI-NEXT:    s_mov_b32 s14, -1
-; SI-NEXT:    s_mov_b32 s2, s14
-; SI-NEXT:    s_mov_b32 s3, s15
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_mov_b32 s15, s11
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s0, s10
-; SI-NEXT:    s_mov_b32 s1, s11
-; SI-NEXT:    s_mov_b32 s10, s14
-; SI-NEXT:    s_mov_b32 s11, s15
-; SI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; SI-NEXT:    buffer_load_dwordx2 v[2:3], off, s[0:3], 0
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s6, s14
-; SI-NEXT:    s_mov_b32 s7, s15
+; SI-NEXT:    s_mov_b32 s8, s0
+; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s2, s10
+; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s6, s10
+; SI-NEXT:    s_mov_b32 s7, s11
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], off, s[4:7], 0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, v0, v2
 ; SI-NEXT:    v_addc_u32_e32 v5, vcc, v1, v3, vcc
 ; SI-NEXT:    v_cmp_gt_i64_e32 vcc, 0, v[2:3]
 ; SI-NEXT:    v_cmp_lt_i64_e64 s[0:1], v[4:5], v[0:1]
-; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[8:11], 0
 ; SI-NEXT:    s_xor_b64 s[0:1], vcc, s[0:1]
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; SI-NEXT:    buffer_store_byte v0, off, s[12:15], 0
@@ -354,50 +364,50 @@ define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    v_mov_b32_e32 v6, s4
-; VI-NEXT:    v_mov_b32_e32 v7, s5
-; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v7, s7
 ; VI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v2, s0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v8, vcc, v6, v4
-; VI-NEXT:    v_addc_u32_e32 v9, vcc, v7, v5, vcc
-; VI-NEXT:    v_cmp_gt_i64_e32 vcc, 0, v[4:5]
-; VI-NEXT:    v_cmp_lt_i64_e64 s[0:1], v[8:9], v[6:7]
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[8:9]
+; VI-NEXT:    v_add_u32_e32 v8, vcc, v4, v6
+; VI-NEXT:    v_addc_u32_e32 v9, vcc, v5, v7, vcc
+; VI-NEXT:    v_cmp_gt_i64_e32 vcc, 0, v[6:7]
+; VI-NEXT:    v_cmp_lt_i64_e64 s[0:1], v[8:9], v[4:5]
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[8:9]
 ; VI-NEXT:    s_xor_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: v_saddo_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v4, s6
-; GFX9-NEXT:    v_mov_b32_e32 v5, s7
-; GFX9-NEXT:    v_mov_b32_e32 v6, s4
-; GFX9-NEXT:    v_mov_b32_e32 v7, s5
-; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[6:7], off
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v5, s5
+; GFX9-NEXT:    v_mov_b32_e32 v6, s6
+; GFX9-NEXT:    v_mov_b32_e32 v7, s7
 ; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[4:5], off
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[6:7], off
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v8, vcc, v6, v4
-; GFX9-NEXT:    v_addc_co_u32_e32 v9, vcc, v7, v5, vcc
-; GFX9-NEXT:    v_cmp_gt_i64_e32 vcc, 0, v[4:5]
-; GFX9-NEXT:    v_cmp_lt_i64_e64 s[0:1], v[8:9], v[6:7]
-; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[8:9], off
+; GFX9-NEXT:    v_add_co_u32_e32 v8, vcc, v4, v6
+; GFX9-NEXT:    v_addc_co_u32_e32 v9, vcc, v5, v7, vcc
+; GFX9-NEXT:    v_cmp_gt_i64_e32 vcc, 0, v[6:7]
+; GFX9-NEXT:    v_cmp_lt_i64_e64 s[0:1], v[8:9], v[4:5]
+; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[8:9], off
 ; GFX9-NEXT:    s_xor_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GFX9-NEXT:    global_store_byte v[0:1], v2, off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_byte v[2:3], v0, off
 ; GFX9-NEXT:    s_endpgm
   %a = load i64, i64 addrspace(1)* %aptr, align 4
   %b = load i64, i64 addrspace(1)* %bptr, align 4
@@ -412,22 +422,26 @@ define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
 define amdgpu_kernel void @v_saddo_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %carryout, <2 x i32> addrspace(1)* %aptr, <2 x i32> addrspace(1)* %bptr) nounwind {
 ; SI-LABEL: v_saddo_v2i32:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[8:15], s[0:1], 0x9
-; SI-NEXT:    s_mov_b32 s19, 0xf000
-; SI-NEXT:    s_mov_b32 s18, -1
-; SI-NEXT:    s_mov_b32 s2, s18
-; SI-NEXT:    s_mov_b32 s3, s19
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_mov_b32 s15, s11
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s0, s14
-; SI-NEXT:    s_mov_b32 s1, s15
-; SI-NEXT:    s_mov_b32 s14, s18
-; SI-NEXT:    s_mov_b32 s15, s19
-; SI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[12:15], 0
-; SI-NEXT:    buffer_load_dwordx2 v[2:3], off, s[0:3], 0
-; SI-NEXT:    s_mov_b32 s16, s10
-; SI-NEXT:    s_mov_b32 s17, s11
-; SI-NEXT:    s_mov_b32 s10, s18
-; SI-NEXT:    s_mov_b32 s11, s19
+; SI-NEXT:    s_mov_b32 s8, s0
+; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s2, s10
+; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s6, s10
+; SI-NEXT:    s_mov_b32 s7, s11
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], off, s[4:7], 0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_add_i32_e32 v5, vcc, v1, v3
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, v0, v2
@@ -440,65 +454,65 @@ define amdgpu_kernel void @v_saddo_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
 ; SI-NEXT:    s_xor_b64 s[0:1], vcc, s[2:3]
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[8:11], 0
-; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[16:19], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[12:15], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_saddo_v2i32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    v_mov_b32_e32 v6, s4
-; VI-NEXT:    v_mov_b32_e32 v7, s5
-; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v7, s7
 ; VI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v2, s0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v9, vcc, v7, v5
-; VI-NEXT:    v_add_u32_e32 v8, vcc, v6, v4
-; VI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v5
-; VI-NEXT:    v_cmp_lt_i32_e64 s[4:5], v9, v7
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v4
-; VI-NEXT:    v_cmp_lt_i32_e64 s[2:3], v8, v6
+; VI-NEXT:    v_add_u32_e32 v9, vcc, v5, v7
+; VI-NEXT:    v_add_u32_e32 v8, vcc, v4, v6
+; VI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v7
+; VI-NEXT:    v_cmp_lt_i32_e64 s[4:5], v9, v5
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v6
+; VI-NEXT:    v_cmp_lt_i32_e64 s[2:3], v8, v4
 ; VI-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[8:9]
-; VI-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[8:9]
+; VI-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
 ; VI-NEXT:    s_xor_b64 s[0:1], vcc, s[2:3]
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: v_saddo_v2i32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v4, s6
-; GFX9-NEXT:    v_mov_b32_e32 v5, s7
-; GFX9-NEXT:    v_mov_b32_e32 v6, s4
-; GFX9-NEXT:    v_mov_b32_e32 v7, s5
-; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[6:7], off
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v5, s5
+; GFX9-NEXT:    v_mov_b32_e32 v6, s6
+; GFX9-NEXT:    v_mov_b32_e32 v7, s7
 ; GFX9-NEXT:    global_load_dwordx2 v[4:5], v[4:5], off
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    global_load_dwordx2 v[6:7], v[6:7], off
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_add_u32_e32 v9, v7, v5
-; GFX9-NEXT:    v_add_u32_e32 v8, v6, v4
-; GFX9-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v5
-; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], v9, v7
-; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v4
-; GFX9-NEXT:    v_cmp_lt_i32_e64 s[2:3], v8, v6
+; GFX9-NEXT:    v_add_u32_e32 v9, v5, v7
+; GFX9-NEXT:    v_add_u32_e32 v8, v4, v6
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v7
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], v9, v5
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v6
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[2:3], v8, v4
 ; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
-; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[8:9], off
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[8:9], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
 ; GFX9-NEXT:    s_xor_b64 s[0:1], vcc, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
 ; GFX9-NEXT:    s_endpgm
   %a = load <2 x i32>, <2 x i32> addrspace(1)* %aptr, align 4
   %b = load <2 x i32>, <2 x i32> addrspace(1)* %bptr, align 4

diff  --git a/llvm/test/CodeGen/AMDGPU/select.f16.ll b/llvm/test/CodeGen/AMDGPU/select.f16.ll
index f023f7a7c8b1..5f4c49d97097 100644
--- a/llvm/test/CodeGen/AMDGPU/select.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select.f16.ll
@@ -6,28 +6,30 @@ define amdgpu_kernel void @select_f16(
 ; SI-LABEL: select_f16:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x11
-; SI-NEXT:    s_mov_b32 s15, 0xf000
-; SI-NEXT:    s_mov_b32 s14, -1
-; SI-NEXT:    s_mov_b32 s22, s14
+; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x11
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s18, s2
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s20, s6
-; SI-NEXT:    s_mov_b32 s21, s7
-; SI-NEXT:    s_mov_b32 s23, s15
-; SI-NEXT:    s_mov_b32 s16, s10
-; SI-NEXT:    s_mov_b32 s17, s11
-; SI-NEXT:    s_mov_b32 s2, s14
-; SI-NEXT:    s_mov_b32 s3, s15
-; SI-NEXT:    s_mov_b32 s18, s14
-; SI-NEXT:    s_mov_b32 s19, s15
-; SI-NEXT:    s_mov_b32 s10, s14
-; SI-NEXT:    s_mov_b32 s11, s15
-; SI-NEXT:    buffer_load_ushort v0, off, s[20:23], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
-; SI-NEXT:    buffer_load_ushort v2, off, s[16:19], 0
-; SI-NEXT:    buffer_load_ushort v3, off, s[0:3], 0
-; SI-NEXT:    s_mov_b32 s12, s4
-; SI-NEXT:    s_mov_b32 s13, s5
+; SI-NEXT:    s_mov_b32 s16, s6
+; SI-NEXT:    s_mov_b32 s17, s7
+; SI-NEXT:    s_mov_b32 s19, s3
+; SI-NEXT:    s_mov_b32 s20, s8
+; SI-NEXT:    s_mov_b32 s21, s9
+; SI-NEXT:    s_mov_b32 s8, s10
+; SI-NEXT:    s_mov_b32 s9, s11
+; SI-NEXT:    s_mov_b32 s22, s2
+; SI-NEXT:    s_mov_b32 s23, s3
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    s_mov_b32 s14, s2
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
+; SI-NEXT:    buffer_load_ushort v1, off, s[20:23], 0
+; SI-NEXT:    buffer_load_ushort v2, off, s[8:11], 0
+; SI-NEXT:    buffer_load_ushort v3, off, s[12:15], 0
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
 ; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -39,7 +41,7 @@ define amdgpu_kernel void @select_f16(
 ; SI-NEXT:    v_cmp_lt_f32_e32 vcc, v0, v1
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
 ; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT:    buffer_store_short v0, off, s[12:15], 0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: select_f16:
@@ -48,7 +50,7 @@ define amdgpu_kernel void @select_f16(
 ; VI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x44
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s14, s2
+; VI-NEXT:    s_mov_b32 s18, s2
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 s0, s4
 ; VI-NEXT:    s_mov_b32 s1, s5
@@ -56,16 +58,18 @@ define amdgpu_kernel void @select_f16(
 ; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s2
 ; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s16, s10
-; VI-NEXT:    s_mov_b32 s17, s11
-; VI-NEXT:    s_mov_b32 s15, s3
-; VI-NEXT:    s_mov_b32 s18, s2
+; VI-NEXT:    s_mov_b32 s16, s8
+; VI-NEXT:    s_mov_b32 s17, s9
+; VI-NEXT:    s_mov_b32 s8, s10
+; VI-NEXT:    s_mov_b32 s9, s11
 ; VI-NEXT:    s_mov_b32 s19, s3
 ; VI-NEXT:    s_mov_b32 s10, s2
 ; VI-NEXT:    s_mov_b32 s11, s3
+; VI-NEXT:    s_mov_b32 s14, s2
+; VI-NEXT:    s_mov_b32 s15, s3
 ; VI-NEXT:    buffer_load_ushort v0, off, s[4:7], 0
-; VI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
-; VI-NEXT:    buffer_load_ushort v2, off, s[16:19], 0
+; VI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
+; VI-NEXT:    buffer_load_ushort v2, off, s[8:11], 0
 ; VI-NEXT:    buffer_load_ushort v3, off, s[12:15], 0
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_cmp_lt_f16_e32 vcc, v0, v1
@@ -95,20 +99,22 @@ define amdgpu_kernel void @select_f16_imm_a(
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s18, s10
-; SI-NEXT:    s_mov_b32 s19, s11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; SI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
+; SI-NEXT:    buffer_load_ushort v2, off, s[4:7], 0
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -137,13 +143,15 @@ define amdgpu_kernel void @select_f16_imm_a(
 ; VI-NEXT:    s_mov_b32 s1, s3
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s10
 ; VI-NEXT:    s_mov_b32 s7, s11
 ; VI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; VI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; VI-NEXT:    buffer_load_ushort v1, off, s[12:15], 0
+; VI-NEXT:    buffer_load_ushort v2, off, s[4:7], 0
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_cmp_lt_f16_e32 vcc, 0.5, v0
 ; VI-NEXT:    s_waitcnt vmcnt(0)
@@ -170,20 +178,22 @@ define amdgpu_kernel void @select_f16_imm_b(
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s18, s10
-; SI-NEXT:    s_mov_b32 s19, s11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; SI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
+; SI-NEXT:    buffer_load_ushort v2, off, s[4:7], 0
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -212,13 +222,15 @@ define amdgpu_kernel void @select_f16_imm_b(
 ; VI-NEXT:    s_mov_b32 s1, s3
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s10
 ; VI-NEXT:    s_mov_b32 s7, s11
 ; VI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; VI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; VI-NEXT:    buffer_load_ushort v1, off, s[12:15], 0
+; VI-NEXT:    buffer_load_ushort v2, off, s[4:7], 0
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_cmp_gt_f16_e32 vcc, 0.5, v0
 ; VI-NEXT:    s_waitcnt vmcnt(0)
@@ -245,20 +257,22 @@ define amdgpu_kernel void @select_f16_imm_c(
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s18, s10
-; SI-NEXT:    s_mov_b32 s19, s11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; SI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
+; SI-NEXT:    buffer_load_ushort v2, off, s[4:7], 0
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -287,13 +301,15 @@ define amdgpu_kernel void @select_f16_imm_c(
 ; VI-NEXT:    s_mov_b32 s1, s3
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
+; VI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
+; VI-NEXT:    buffer_load_ushort v1, off, s[12:15], 0
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s10
 ; VI-NEXT:    s_mov_b32 s7, s11
-; VI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; VI-NEXT:    buffer_load_ushort v3, off, s[12:15], 0
+; VI-NEXT:    buffer_load_ushort v3, off, s[4:7], 0
 ; VI-NEXT:    v_mov_b32_e32 v2, 0x3800
 ; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_cmp_nlt_f16_e32 vcc, v0, v1
@@ -321,20 +337,22 @@ define amdgpu_kernel void @select_f16_imm_d(
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s18, s10
-; SI-NEXT:    s_mov_b32 s19, s11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; SI-NEXT:    buffer_load_ushort v2, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
+; SI-NEXT:    buffer_load_ushort v2, off, s[4:7], 0
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
 ; SI-NEXT:    s_waitcnt vmcnt(2)
@@ -363,13 +381,15 @@ define amdgpu_kernel void @select_f16_imm_d(
 ; VI-NEXT:    s_mov_b32 s1, s3
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
+; VI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
+; VI-NEXT:    buffer_load_ushort v1, off, s[12:15], 0
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s10
 ; VI-NEXT:    s_mov_b32 s7, s11
-; VI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_ushort v1, off, s[4:7], 0
-; VI-NEXT:    buffer_load_ushort v3, off, s[12:15], 0
+; VI-NEXT:    buffer_load_ushort v3, off, s[4:7], 0
 ; VI-NEXT:    v_mov_b32_e32 v2, 0x3800
 ; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_cmp_lt_f16_e32 vcc, v0, v1
@@ -398,23 +418,25 @@ define amdgpu_kernel void @select_v2f16(
 ; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x11
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s22, s2
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s20, s6
-; SI-NEXT:    s_mov_b32 s21, s7
-; SI-NEXT:    s_mov_b32 s23, s3
-; SI-NEXT:    s_mov_b32 s16, s10
-; SI-NEXT:    s_mov_b32 s17, s11
-; SI-NEXT:    s_mov_b32 s14, s2
-; SI-NEXT:    s_mov_b32 s15, s3
 ; SI-NEXT:    s_mov_b32 s18, s2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s16, s6
+; SI-NEXT:    s_mov_b32 s17, s7
 ; SI-NEXT:    s_mov_b32 s19, s3
+; SI-NEXT:    s_mov_b32 s20, s8
+; SI-NEXT:    s_mov_b32 s21, s9
+; SI-NEXT:    s_mov_b32 s8, s10
+; SI-NEXT:    s_mov_b32 s9, s11
+; SI-NEXT:    s_mov_b32 s22, s2
+; SI-NEXT:    s_mov_b32 s23, s3
 ; SI-NEXT:    s_mov_b32 s10, s2
 ; SI-NEXT:    s_mov_b32 s11, s3
-; SI-NEXT:    buffer_load_dword v0, off, s[20:23], 0
-; SI-NEXT:    buffer_load_dword v1, off, s[8:11], 0
+; SI-NEXT:    s_mov_b32 s14, s2
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    buffer_load_dword v0, off, s[16:19], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[20:23], 0
 ; SI-NEXT:    buffer_load_dword v2, off, s[12:15], 0
-; SI-NEXT:    buffer_load_dword v3, off, s[16:19], 0
+; SI-NEXT:    buffer_load_dword v3, off, s[8:11], 0
 ; SI-NEXT:    s_mov_b32 s0, s4
 ; SI-NEXT:    s_mov_b32 s1, s5
 ; SI-NEXT:    s_waitcnt vmcnt(3)
@@ -450,7 +472,7 @@ define amdgpu_kernel void @select_v2f16(
 ; VI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x44
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s14, s2
+; VI-NEXT:    s_mov_b32 s18, s2
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 s0, s4
 ; VI-NEXT:    s_mov_b32 s1, s5
@@ -458,17 +480,19 @@ define amdgpu_kernel void @select_v2f16(
 ; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s2
 ; VI-NEXT:    s_mov_b32 s7, s3
-; VI-NEXT:    s_mov_b32 s16, s10
-; VI-NEXT:    s_mov_b32 s17, s11
-; VI-NEXT:    s_mov_b32 s15, s3
-; VI-NEXT:    s_mov_b32 s18, s2
+; VI-NEXT:    s_mov_b32 s16, s8
+; VI-NEXT:    s_mov_b32 s17, s9
+; VI-NEXT:    s_mov_b32 s8, s10
+; VI-NEXT:    s_mov_b32 s9, s11
 ; VI-NEXT:    s_mov_b32 s19, s3
 ; VI-NEXT:    s_mov_b32 s10, s2
 ; VI-NEXT:    s_mov_b32 s11, s3
+; VI-NEXT:    s_mov_b32 s14, s2
+; VI-NEXT:    s_mov_b32 s15, s3
 ; VI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
-; VI-NEXT:    buffer_load_dword v1, off, s[8:11], 0
+; VI-NEXT:    buffer_load_dword v1, off, s[16:19], 0
 ; VI-NEXT:    buffer_load_dword v2, off, s[12:15], 0
-; VI-NEXT:    buffer_load_dword v3, off, s[16:19], 0
+; VI-NEXT:    buffer_load_dword v3, off, s[8:11], 0
 ; VI-NEXT:    s_waitcnt vmcnt(3)
 ; VI-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
 ; VI-NEXT:    s_waitcnt vmcnt(2)
@@ -509,17 +533,19 @@ define amdgpu_kernel void @select_v2f16_imm_a(
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s18, s10
-; SI-NEXT:    s_mov_b32 s19, s11
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s13, s3
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    buffer_load_dword v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
-; SI-NEXT:    buffer_load_dword v2, off, s[12:15], 0
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
+; SI-NEXT:    buffer_load_dword v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[16:19], 0
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
 ; SI-NEXT:    s_mov_b32 s2, 0x3f200000
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
@@ -556,17 +582,19 @@ define amdgpu_kernel void @select_v2f16_imm_a(
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 s8, s0
 ; VI-NEXT:    s_mov_b32 s9, s1
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
 ; VI-NEXT:    s_mov_b32 s0, s2
 ; VI-NEXT:    s_mov_b32 s1, s3
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
 ; VI-NEXT:    s_mov_b32 s6, s10
 ; VI-NEXT:    s_mov_b32 s7, s11
 ; VI-NEXT:    buffer_load_dword v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
-; VI-NEXT:    buffer_load_dword v2, off, s[12:15], 0
+; VI-NEXT:    buffer_load_dword v1, off, s[12:15], 0
+; VI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
 ; VI-NEXT:    s_movk_i32 s0, 0x3900
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -604,17 +632,19 @@ define amdgpu_kernel void @select_v2f16_imm_b(
 ; SI-NEXT:    s_mov_b32 s14, s10
 ; SI-NEXT:    s_mov_b32 s15, s11
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s18, s10
-; SI-NEXT:    s_mov_b32 s19, s11
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s13, s3
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    buffer_load_dword v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
-; SI-NEXT:    buffer_load_dword v2, off, s[12:15], 0
+; SI-NEXT:    s_mov_b32 s18, s10
+; SI-NEXT:    s_mov_b32 s19, s11
+; SI-NEXT:    buffer_load_dword v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[16:19], 0
+; SI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
 ; SI-NEXT:    s_mov_b32 s2, 0x3f200000
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
@@ -651,17 +681,19 @@ define amdgpu_kernel void @select_v2f16_imm_b(
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 s8, s0
 ; VI-NEXT:    s_mov_b32 s9, s1
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
 ; VI-NEXT:    s_mov_b32 s0, s2
 ; VI-NEXT:    s_mov_b32 s1, s3
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
 ; VI-NEXT:    s_mov_b32 s6, s10
 ; VI-NEXT:    s_mov_b32 s7, s11
 ; VI-NEXT:    buffer_load_dword v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
-; VI-NEXT:    buffer_load_dword v2, off, s[12:15], 0
+; VI-NEXT:    buffer_load_dword v1, off, s[12:15], 0
+; VI-NEXT:    buffer_load_dword v2, off, s[4:7], 0
 ; VI-NEXT:    s_movk_i32 s0, 0x3900
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -696,20 +728,22 @@ define amdgpu_kernel void @select_v2f16_imm_c(
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
 ; SI-NEXT:    s_mov_b32 s18, s10
 ; SI-NEXT:    s_mov_b32 s19, s11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s14, s10
-; SI-NEXT:    s_mov_b32 s15, s11
-; SI-NEXT:    buffer_load_dword v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_dword v1, off, s[12:15], 0
-; SI-NEXT:    buffer_load_dword v3, off, s[4:7], 0
+; SI-NEXT:    buffer_load_dword v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
+; SI-NEXT:    buffer_load_dword v3, off, s[16:19], 0
 ; SI-NEXT:    v_mov_b32_e32 v2, 0x3f200000
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
@@ -745,17 +779,19 @@ define amdgpu_kernel void @select_v2f16_imm_c(
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 s8, s0
 ; VI-NEXT:    s_mov_b32 s9, s1
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
 ; VI-NEXT:    s_mov_b32 s0, s2
 ; VI-NEXT:    s_mov_b32 s1, s3
-; VI-NEXT:    s_mov_b32 s6, s10
-; VI-NEXT:    s_mov_b32 s7, s11
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
+; VI-NEXT:    s_mov_b32 s6, s10
+; VI-NEXT:    s_mov_b32 s7, s11
 ; VI-NEXT:    buffer_load_dword v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_dword v1, off, s[12:15], 0
-; VI-NEXT:    buffer_load_dword v4, off, s[4:7], 0
+; VI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
+; VI-NEXT:    buffer_load_dword v4, off, s[12:15], 0
 ; VI-NEXT:    v_mov_b32_e32 v2, 0x3800
 ; VI-NEXT:    v_mov_b32_e32 v3, 0x3900
 ; VI-NEXT:    s_waitcnt vmcnt(2)
@@ -791,20 +827,22 @@ define amdgpu_kernel void @select_v2f16_imm_d(
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s14, s10
+; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s16, s4
+; SI-NEXT:    s_mov_b32 s17, s5
+; SI-NEXT:    s_mov_b32 s12, s2
+; SI-NEXT:    s_mov_b32 s13, s3
 ; SI-NEXT:    s_mov_b32 s18, s10
 ; SI-NEXT:    s_mov_b32 s19, s11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
 ; SI-NEXT:    s_mov_b32 s6, s10
 ; SI-NEXT:    s_mov_b32 s7, s11
-; SI-NEXT:    s_mov_b32 s16, s2
-; SI-NEXT:    s_mov_b32 s17, s3
-; SI-NEXT:    s_mov_b32 s14, s10
-; SI-NEXT:    s_mov_b32 s15, s11
-; SI-NEXT:    buffer_load_dword v0, off, s[16:19], 0
-; SI-NEXT:    buffer_load_dword v1, off, s[12:15], 0
-; SI-NEXT:    buffer_load_dword v3, off, s[4:7], 0
+; SI-NEXT:    buffer_load_dword v0, off, s[12:15], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
+; SI-NEXT:    buffer_load_dword v3, off, s[16:19], 0
 ; SI-NEXT:    v_mov_b32_e32 v2, 0x3f200000
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
@@ -841,17 +879,19 @@ define amdgpu_kernel void @select_v2f16_imm_d(
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_mov_b32 s8, s0
 ; VI-NEXT:    s_mov_b32 s9, s1
-; VI-NEXT:    s_mov_b32 s12, s6
-; VI-NEXT:    s_mov_b32 s13, s7
 ; VI-NEXT:    s_mov_b32 s0, s2
 ; VI-NEXT:    s_mov_b32 s1, s3
-; VI-NEXT:    s_mov_b32 s6, s10
-; VI-NEXT:    s_mov_b32 s7, s11
+; VI-NEXT:    s_mov_b32 s12, s4
+; VI-NEXT:    s_mov_b32 s13, s5
 ; VI-NEXT:    s_mov_b32 s2, s10
 ; VI-NEXT:    s_mov_b32 s3, s11
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
+; VI-NEXT:    s_mov_b32 s6, s10
+; VI-NEXT:    s_mov_b32 s7, s11
 ; VI-NEXT:    buffer_load_dword v0, off, s[0:3], 0
-; VI-NEXT:    buffer_load_dword v1, off, s[12:15], 0
-; VI-NEXT:    buffer_load_dword v4, off, s[4:7], 0
+; VI-NEXT:    buffer_load_dword v1, off, s[4:7], 0
+; VI-NEXT:    buffer_load_dword v4, off, s[12:15], 0
 ; VI-NEXT:    v_mov_b32_e32 v2, 0x3800
 ; VI-NEXT:    v_mov_b32_e32 v3, 0x3900
 ; VI-NEXT:    s_waitcnt vmcnt(2)

diff  --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 11dc48ca8aa9..187caffdade9 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -163,27 +163,27 @@ define amdgpu_kernel void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out,
 ; SI-LABEL: sgpr_if_else_valu_cmp_phi_br:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s10, 0
 ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT:    ; implicit-def: $sgpr2_sgpr3
-; SI-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; SI-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; SI-NEXT:    ; implicit-def: $sgpr0_sgpr1
+; SI-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; SI-NEXT:    s_xor_b64 s[2:3], exec, s[2:3]
 ; SI-NEXT:    s_cbranch_execz BB3_2
 ; SI-NEXT:  ; %bb.1: ; %else
-; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
 ; SI-NEXT:    v_mov_b32_e32 v2, 0
-; SI-NEXT:    s_mov_b32 s15, 0xf000
-; SI-NEXT:    s_andn2_b64 s[0:1], s[0:1], exec
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    buffer_load_dword v1, v[1:2], s[12:15], 0 addr64
+; SI-NEXT:    buffer_load_dword v1, v[1:2], s[8:11], 0 addr64
+; SI-NEXT:    s_andn2_b64 s[0:1], s[0:1], exec
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v1
-; SI-NEXT:    s_and_b64 s[2:3], vcc, exec
-; SI-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
+; SI-NEXT:    s_and_b64 s[8:9], vcc, exec
+; SI-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; SI-NEXT:  BB3_2: ; %Flow
-; SI-NEXT:    s_or_saveexec_b64 s[0:1], s[8:9]
-; SI-NEXT:    s_xor_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_or_saveexec_b64 s[2:3], s[2:3]
+; SI-NEXT:    s_xor_b64 exec, exec, s[2:3]
 ; SI-NEXT:    s_cbranch_execz BB3_4
 ; SI-NEXT:  ; %bb.3: ; %if
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
@@ -193,17 +193,17 @@ define amdgpu_kernel void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out,
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
-; SI-NEXT:    s_andn2_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_andn2_b64 s[0:1], s[0:1], exec
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; SI-NEXT:    s_and_b64 s[6:7], vcc, exec
-; SI-NEXT:    s_or_b64 s[2:3], s[2:3], s[6:7]
+; SI-NEXT:    s_or_b64 s[0:1], s[0:1], s[6:7]
 ; SI-NEXT:  BB3_4: ; %endif
-; SI-NEXT:    s_or_b64 exec, exec, s[0:1]
+; SI-NEXT:    s_or_b64 exec, exec, s[2:3]
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/shift-i128.ll b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
index 72bab6964434..f2077aa2a1ad 100644
--- a/llvm/test/CodeGen/AMDGPU/shift-i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
@@ -188,21 +188,21 @@ define amdgpu_kernel void @s_shl_i128_ss(i128 %lhs, i128 %rhs) {
 ; GCN-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_sub_i32 s2, 64, s8
-; GCN-NEXT:    s_sub_i32 s9, s8, 64
+; GCN-NEXT:    s_sub_i32 s9, 64, s8
+; GCN-NEXT:    s_sub_i32 s2, s8, 64
 ; GCN-NEXT:    s_lshl_b64 s[0:1], s[6:7], s8
-; GCN-NEXT:    s_lshr_b64 s[2:3], s[4:5], s2
-; GCN-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
-; GCN-NEXT:    s_lshl_b64 s[10:11], s[4:5], s9
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    s_lshr_b64 s[10:11], s[4:5], s9
+; GCN-NEXT:    s_or_b64 s[10:11], s[0:1], s[10:11]
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[4:5], s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s11
 ; GCN-NEXT:    v_cmp_lt_u32_e64 vcc, s8, 64
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, 0
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v0, v1, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s10
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s6
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
@@ -225,21 +225,21 @@ define amdgpu_kernel void @s_lshr_i128_ss(i128 %lhs, i128 %rhs) {
 ; GCN-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_sub_i32 s2, 64, s8
-; GCN-NEXT:    s_sub_i32 s9, s8, 64
+; GCN-NEXT:    s_sub_i32 s9, 64, s8
+; GCN-NEXT:    s_sub_i32 s2, s8, 64
 ; GCN-NEXT:    s_lshr_b64 s[0:1], s[4:5], s8
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[6:7], s2
-; GCN-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
-; GCN-NEXT:    s_lshr_b64 s[10:11], s[6:7], s9
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    s_lshl_b64 s[10:11], s[6:7], s9
+; GCN-NEXT:    s_or_b64 s[10:11], s[0:1], s[10:11]
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[6:7], s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s11
 ; GCN-NEXT:    v_cmp_lt_u32_e64 vcc, s8, 64
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, 0
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s10
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
@@ -260,31 +260,31 @@ define amdgpu_kernel void @s_ashr_i128_ss(i128 %lhs, i128 %rhs) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx8 s[4:11], s[4:5], 0x0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_sub_i32 s2, 64, s8
-; GCN-NEXT:    s_sub_i32 s9, s8, 64
-; GCN-NEXT:    s_lshr_b64 s[0:1], s[4:5], s8
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[6:7], s2
-; GCN-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
-; GCN-NEXT:    s_ashr_i64 s[10:11], s[6:7], s9
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], s8
+; GCN-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    s_sub_i32 s0, s8, 64
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[6:7], s0
+; GCN-NEXT:    s_sub_i32 s0, 64, s8
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    s_lshl_b64 s[0:1], s[6:7], s0
+; GCN-NEXT:    s_lshr_b64 s[6:7], s[4:5], s8
 ; GCN-NEXT:    v_cmp_lt_u32_e64 vcc, s8, 64
+; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, 0
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], s8
-; GCN-NEXT:    s_ashr_i32 s2, s7, 31
-; GCN-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NEXT:    v_mov_b32_e32 v4, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v2, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v4, s6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
@@ -298,31 +298,31 @@ define <2 x i128> @v_shl_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-LABEL: v_shl_v2i128_vv:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_sub_i32_e32 v18, vcc, 64, v8
-; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v8
-; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v18
+; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
+; GCN-NEXT:    v_lshr_b64 v[16:17], v[0:1], v16
+; GCN-NEXT:    v_lshl_b64 v[18:19], v[2:3], v8
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[10:11]
 ; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[8:9]
 ; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
 ; GCN-NEXT:    v_subrev_i32_e32 v9, vcc, 64, v8
 ; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
-; GCN-NEXT:    v_or_b32_e32 v19, v17, v19
-; GCN-NEXT:    v_or_b32_e32 v18, v16, v18
+; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
+; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
 ; GCN-NEXT:    v_lshl_b64 v[16:17], v[0:1], v9
 ; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
 ; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
 ; GCN-NEXT:    v_cndmask_b32_e64 v9, v16, v18, s[4:5]
-; GCN-NEXT:    v_sub_i32_e64 v16, s[6:7], 64, v12
-; GCN-NEXT:    v_cndmask_b32_e64 v11, v17, v19, s[4:5]
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v9, v2, vcc
-; GCN-NEXT:    v_lshl_b64 v[9:10], v[6:7], v12
-; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v16
+; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
+; GCN-NEXT:    v_cndmask_b32_e64 v11, v17, v19, s[4:5]
+; GCN-NEXT:    v_lshr_b64 v[9:10], v[4:5], v9
+; GCN-NEXT:    v_lshl_b64 v[16:17], v[6:7], v12
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[8:9], 0, v[14:15]
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, v11, v3, vcc
-; GCN-NEXT:    v_or_b32_e32 v16, v9, v16
+; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
 ; GCN-NEXT:    v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
 ; GCN-NEXT:    v_subrev_i32_e32 v9, vcc, 64, v12
-; GCN-NEXT:    v_or_b32_e32 v11, v10, v17
+; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
 ; GCN-NEXT:    v_lshl_b64 v[9:10], v[4:5], v9
 ; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
 ; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
@@ -347,31 +347,31 @@ define <2 x i128> @v_lshr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-LABEL: v_lshr_v2i128_vv:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_sub_i32_e32 v18, vcc, 64, v8
-; GCN-NEXT:    v_lshr_b64 v[16:17], v[0:1], v8
-; GCN-NEXT:    v_lshl_b64 v[18:19], v[2:3], v18
+; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
+; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v16
+; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v8
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[10:11]
 ; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[8:9]
 ; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
 ; GCN-NEXT:    v_subrev_i32_e32 v9, vcc, 64, v8
 ; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
-; GCN-NEXT:    v_or_b32_e32 v19, v17, v19
-; GCN-NEXT:    v_or_b32_e32 v18, v16, v18
+; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
+; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
 ; GCN-NEXT:    v_lshr_b64 v[16:17], v[2:3], v9
 ; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
 ; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
 ; GCN-NEXT:    v_cndmask_b32_e64 v9, v16, v18, s[4:5]
-; GCN-NEXT:    v_sub_i32_e64 v16, s[6:7], 64, v12
-; GCN-NEXT:    v_cndmask_b32_e64 v11, v17, v19, s[4:5]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc
-; GCN-NEXT:    v_lshr_b64 v[9:10], v[4:5], v12
-; GCN-NEXT:    v_lshl_b64 v[16:17], v[6:7], v16
+; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
+; GCN-NEXT:    v_cndmask_b32_e64 v11, v17, v19, s[4:5]
+; GCN-NEXT:    v_lshl_b64 v[9:10], v[6:7], v9
+; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v12
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[8:9], 0, v[14:15]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v11, v1, vcc
-; GCN-NEXT:    v_or_b32_e32 v16, v9, v16
+; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
 ; GCN-NEXT:    v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
 ; GCN-NEXT:    v_subrev_i32_e32 v9, vcc, 64, v12
-; GCN-NEXT:    v_or_b32_e32 v11, v10, v17
+; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
 ; GCN-NEXT:    v_lshr_b64 v[9:10], v[6:7], v9
 ; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
 ; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
@@ -396,31 +396,31 @@ define <2 x i128> @v_ashr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-LABEL: v_ashr_v2i128_vv:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_sub_i32_e32 v18, vcc, 64, v8
-; GCN-NEXT:    v_lshr_b64 v[16:17], v[0:1], v8
-; GCN-NEXT:    v_lshl_b64 v[18:19], v[2:3], v18
+; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
+; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v16
+; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v8
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[10:11]
 ; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[8:9]
 ; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
 ; GCN-NEXT:    v_subrev_i32_e32 v9, vcc, 64, v8
 ; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
-; GCN-NEXT:    v_or_b32_e32 v19, v17, v19
-; GCN-NEXT:    v_or_b32_e32 v18, v16, v18
+; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
+; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
 ; GCN-NEXT:    v_ashr_i64 v[16:17], v[2:3], v9
 ; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
 ; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
 ; GCN-NEXT:    v_cndmask_b32_e64 v9, v16, v18, s[4:5]
-; GCN-NEXT:    v_sub_i32_e64 v16, s[6:7], 64, v12
-; GCN-NEXT:    v_cndmask_b32_e64 v11, v17, v19, s[4:5]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc
-; GCN-NEXT:    v_lshr_b64 v[9:10], v[4:5], v12
-; GCN-NEXT:    v_lshl_b64 v[16:17], v[6:7], v16
+; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
+; GCN-NEXT:    v_cndmask_b32_e64 v11, v17, v19, s[4:5]
+; GCN-NEXT:    v_lshl_b64 v[9:10], v[6:7], v9
+; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v12
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[8:9], 0, v[14:15]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v11, v1, vcc
-; GCN-NEXT:    v_or_b32_e32 v16, v9, v16
+; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
 ; GCN-NEXT:    v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
 ; GCN-NEXT:    v_subrev_i32_e32 v9, vcc, 64, v12
-; GCN-NEXT:    v_or_b32_e32 v11, v10, v17
+; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
 ; GCN-NEXT:    v_ashr_i64 v[9:10], v[6:7], v9
 ; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
 ; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
@@ -446,65 +446,65 @@ define <2 x i128> @v_ashr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 define amdgpu_kernel void @s_shl_v2i128ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-LABEL: s_shl_v2i128ss:
 ; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx8 s[16:23], s[4:5], 0x8
 ; GCN-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x0
-; GCN-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x8
+; GCN-NEXT:    v_mov_b32_e32 v10, 16
 ; GCN-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[16:17], s[0:1], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[2:3], 0
-; GCN-NEXT:    s_lshl_b64 s[20:21], s[8:9], s0
-; GCN-NEXT:    s_and_b64 vcc, s[18:19], s[16:17]
-; GCN-NEXT:    s_sub_i32 s18, 64, s0
-; GCN-NEXT:    s_lshl_b64 s[16:17], s[10:11], s0
-; GCN-NEXT:    s_lshr_b64 s[18:19], s[8:9], s18
-; GCN-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
-; GCN-NEXT:    s_sub_i32 s18, s0, 64
-; GCN-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
-; GCN-NEXT:    s_lshl_b64 s[8:9], s[8:9], s18
-; GCN-NEXT:    v_mov_b32_e32 v2, s9
-; GCN-NEXT:    v_mov_b32_e32 v3, s17
+; GCN-NEXT:    s_sub_i32 s6, 64, s16
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[16:17], 64
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[18:19], 0
+; GCN-NEXT:    s_sub_i32 s4, s16, 64
+; GCN-NEXT:    s_lshr_b64 s[6:7], s[8:9], s6
+; GCN-NEXT:    s_lshl_b64 s[24:25], s[10:11], s16
+; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
+; GCN-NEXT:    s_or_b64 s[0:1], s[16:17], s[18:19]
+; GCN-NEXT:    s_lshl_b64 s[4:5], s[8:9], s4
+; GCN-NEXT:    s_or_b64 s[6:7], s[24:25], s[6:7]
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
-; GCN-NEXT:    v_mov_b32_e32 v3, s11
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v2, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s10
-; GCN-NEXT:    v_mov_b32_e32 v0, s21
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v4, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[4:5], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[6:7], 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v0, v1, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-NEXT:    s_sub_i32 s6, 64, s20
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[20:21], 64
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[22:23], 0
+; GCN-NEXT:    s_sub_i32 s4, s20, 64
+; GCN-NEXT:    s_lshr_b64 s[6:7], s[12:13], s6
+; GCN-NEXT:    s_lshl_b64 s[10:11], s[14:15], s20
+; GCN-NEXT:    s_lshl_b64 s[4:5], s[12:13], s4
+; GCN-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
+; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_or_b64 s[2:3], s[20:21], s[22:23]
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[2:3], 0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v1, s15
+; GCN-NEXT:    v_cndmask_b32_e64 v7, v0, v1, s[2:3]
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s6
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v0, v1, s[2:3]
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[8:9], s16
+; GCN-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v0, vcc
-; GCN-NEXT:    v_mov_b32_e32 v0, s20
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[12:13], s20
+; GCN-NEXT:    v_mov_b32_e32 v4, s3
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, v4, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, v4, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    s_sub_i32 s2, 64, s4
-; GCN-NEXT:    s_lshl_b64 s[8:9], s[12:13], s4
-; GCN-NEXT:    s_lshl_b64 s[0:1], s[14:15], s4
-; GCN-NEXT:    s_lshr_b64 s[2:3], s[12:13], s2
-; GCN-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v4, s9
-; GCN-NEXT:    s_sub_i32 s0, s4, 64
-; GCN-NEXT:    v_cndmask_b32_e32 v5, 0, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-NEXT:    s_lshl_b64 s[8:9], s[12:13], s0
-; GCN-NEXT:    s_or_b64 s[0:1], s[4:5], s[6:7]
-; GCN-NEXT:    v_mov_b32_e32 v6, s9
-; GCN-NEXT:    v_mov_b32_e32 v7, s3
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
-; GCN-NEXT:    v_mov_b32_e32 v7, s15
-; GCN-NEXT:    v_cndmask_b32_e64 v7, v6, v7, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v6, s8
-; GCN-NEXT:    v_mov_b32_e32 v10, s2
-; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
-; GCN-NEXT:    v_mov_b32_e32 v10, s14
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v10, 16
-; GCN-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
 ; GCN-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
 ; GCN-NEXT:    s_endpgm
@@ -516,65 +516,65 @@ define amdgpu_kernel void @s_shl_v2i128ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 define amdgpu_kernel void @s_lshr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-LABEL: s_lshr_v2i128_ss:
 ; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx8 s[16:23], s[4:5], 0x8
 ; GCN-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x0
-; GCN-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x8
+; GCN-NEXT:    v_mov_b32_e32 v10, 16
 ; GCN-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[16:17], s[0:1], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[2:3], 0
-; GCN-NEXT:    s_lshr_b64 s[20:21], s[10:11], s0
-; GCN-NEXT:    s_and_b64 vcc, s[18:19], s[16:17]
-; GCN-NEXT:    s_sub_i32 s18, 64, s0
-; GCN-NEXT:    s_lshr_b64 s[16:17], s[8:9], s0
-; GCN-NEXT:    s_lshl_b64 s[18:19], s[10:11], s18
-; GCN-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
-; GCN-NEXT:    s_sub_i32 s18, s0, 64
-; GCN-NEXT:    v_mov_b32_e32 v0, s21
-; GCN-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v0, vcc
-; GCN-NEXT:    v_mov_b32_e32 v0, s20
-; GCN-NEXT:    s_lshr_b64 s[10:11], s[10:11], s18
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-NEXT:    s_sub_i32 s6, 64, s16
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[16:17], 64
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[18:19], 0
+; GCN-NEXT:    s_sub_i32 s4, s16, 64
+; GCN-NEXT:    s_lshl_b64 s[6:7], s[10:11], s6
+; GCN-NEXT:    s_lshr_b64 s[24:25], s[8:9], s16
+; GCN-NEXT:    s_or_b64 s[6:7], s[24:25], s[6:7]
+; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
+; GCN-NEXT:    s_or_b64 s[0:1], s[16:17], s[18:19]
+; GCN-NEXT:    s_lshr_b64 s[4:5], s[10:11], s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s9
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[4:5], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[6:7], 0
-; GCN-NEXT:    s_lshr_b64 s[8:9], s[14:15], s4
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    s_sub_i32 s2, 64, s4
-; GCN-NEXT:    s_lshr_b64 s[0:1], s[12:13], s4
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[14:15], s2
-; GCN-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v4, s9
-; GCN-NEXT:    s_sub_i32 s0, s4, 64
-; GCN-NEXT:    v_cndmask_b32_e32 v7, 0, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-NEXT:    s_lshr_b64 s[8:9], s[14:15], s0
-; GCN-NEXT:    s_or_b64 s[0:1], s[4:5], s[6:7]
-; GCN-NEXT:    v_cndmask_b32_e32 v6, 0, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s9
-; GCN-NEXT:    v_mov_b32_e32 v5, s3
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_mov_b32_e32 v5, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-NEXT:    v_mov_b32_e32 v10, s2
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GCN-NEXT:    v_mov_b32_e32 v10, s12
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v10, 16
-; GCN-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NEXT:    s_sub_i32 s6, 64, s20
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[20:21], 64
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[22:23], 0
+; GCN-NEXT:    s_sub_i32 s4, s20, 64
+; GCN-NEXT:    s_lshl_b64 s[6:7], s[14:15], s6
+; GCN-NEXT:    s_lshr_b64 s[8:9], s[12:13], s20
+; GCN-NEXT:    s_lshr_b64 s[4:5], s[14:15], s4
+; GCN-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_or_b64 s[2:3], s[20:21], s[22:23]
+; GCN-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[2:3], 0
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v3, s13
+; GCN-NEXT:    v_cndmask_b32_e64 v5, v2, v3, s[2:3]
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_mov_b32_e32 v3, s6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v3, s12
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v2, v3, s[2:3]
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[10:11], s16
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[14:15], s20
+; GCN-NEXT:    v_mov_b32_e32 v6, s3
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, v6, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v6, s2
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, v6, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
 ; GCN-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
 ; GCN-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
 ; GCN-NEXT:    s_endpgm
@@ -586,67 +586,67 @@ define amdgpu_kernel void @s_lshr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 define amdgpu_kernel void @s_ashr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-LABEL: s_ashr_v2i128_ss:
 ; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx8 s[16:23], s[4:5], 0x8
 ; GCN-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x0
-; GCN-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x8
 ; GCN-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s22, s11, 31
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[16:17], s[0:1], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[2:3], 0
-; GCN-NEXT:    s_ashr_i64 s[20:21], s[10:11], s0
-; GCN-NEXT:    s_and_b64 vcc, s[18:19], s[16:17]
-; GCN-NEXT:    s_sub_i32 s18, 64, s0
-; GCN-NEXT:    s_lshr_b64 s[16:17], s[8:9], s0
-; GCN-NEXT:    s_lshl_b64 s[18:19], s[10:11], s18
-; GCN-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
-; GCN-NEXT:    s_sub_i32 s18, s0, 64
-; GCN-NEXT:    v_mov_b32_e32 v0, s22
-; GCN-NEXT:    v_mov_b32_e32 v1, s21
-; GCN-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v1, s20
-; GCN-NEXT:    s_ashr_i64 s[10:11], s[10:11], s18
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-NEXT:    s_sub_i32 s6, 64, s16
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[16:17], 64
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[18:19], 0
+; GCN-NEXT:    s_sub_i32 s4, s16, 64
+; GCN-NEXT:    s_lshl_b64 s[6:7], s[10:11], s6
+; GCN-NEXT:    s_lshr_b64 s[24:25], s[8:9], s16
+; GCN-NEXT:    s_or_b64 s[6:7], s[24:25], s[6:7]
+; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
+; GCN-NEXT:    s_or_b64 s[0:1], s[16:17], s[18:19]
+; GCN-NEXT:    s_ashr_i64 s[4:5], s[10:11], s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s9
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[4:5], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[6:7], 0
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[14:15], s4
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    s_sub_i32 s2, 64, s4
-; GCN-NEXT:    s_ashr_i32 s10, s15, 31
-; GCN-NEXT:    s_lshr_b64 s[0:1], s[12:13], s4
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[14:15], s2
-; GCN-NEXT:    s_or_b64 s[2:3], s[0:1], s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v4, s10
-; GCN-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-NEXT:    s_sub_i32 s0, s4, 64
-; GCN-NEXT:    v_cndmask_b32_e32 v7, v4, v5, vcc
-; GCN-NEXT:    v_mov_b32_e32 v5, s8
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[14:15], s0
-; GCN-NEXT:    s_or_b64 s[0:1], s[4:5], s[6:7]
-; GCN-NEXT:    v_cndmask_b32_e32 v6, v4, v5, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s9
-; GCN-NEXT:    v_mov_b32_e32 v5, s3
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_mov_b32_e32 v5, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NEXT:    s_sub_i32 s6, 64, s20
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[20:21], 64
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[22:23], 0
+; GCN-NEXT:    s_sub_i32 s4, s20, 64
+; GCN-NEXT:    s_lshl_b64 s[6:7], s[14:15], s6
+; GCN-NEXT:    s_lshr_b64 s[8:9], s[12:13], s20
+; GCN-NEXT:    s_ashr_i64 s[4:5], s[14:15], s4
+; GCN-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_or_b64 s[2:3], s[20:21], s[22:23]
+; GCN-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[2:3], 0
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v3, s13
+; GCN-NEXT:    v_cndmask_b32_e64 v5, v2, v3, s[2:3]
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_mov_b32_e32 v3, s6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v3, s12
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v2, v3, s[2:3]
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[10:11], s16
+; GCN-NEXT:    s_ashr_i32 s4, s11, 31
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    v_mov_b32_e32 v6, s2
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[14:15], s20
+; GCN-NEXT:    s_ashr_i32 s4, s15, 31
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v2, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
+; GCN-NEXT:    v_mov_b32_e32 v6, s4
+; GCN-NEXT:    v_mov_b32_e32 v7, s3
 ; GCN-NEXT:    v_mov_b32_e32 v10, s2
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
-; GCN-NEXT:    v_mov_b32_e32 v10, s12
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v7, v6, v7, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v10, 16
 ; GCN-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]

diff  --git a/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll b/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
index 2f93efec69bb..b000e9449e7e 100644
--- a/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
+++ b/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
@@ -304,12 +304,10 @@ bb:
 }
 
 ; GCN-LABEL: {{^}}trunc_shl_vec_vec:
-; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}
-; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
-; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
-; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 6, v{{[0-9]+}}
-; GCN-NOT: v_lshl_b64
-; GCN-NOT: v_lshlrev_b64
+; GCN-DAG: v_lshl_b64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 3
+; GCN-DAG: v_lshl_b64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4
+; GCN-DAG: v_lshl_b64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 5
+; GCN-DAG: v_lshl_b64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 6
 define amdgpu_kernel void @trunc_shl_vec_vec(<4 x i64> addrspace(1)* %arg) {
 bb:
   %v = load <4 x i64>, <4 x i64> addrspace(1)* %arg, align 32

diff  --git a/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll b/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
index cf0d45603218..4730d92b9f5b 100644
--- a/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
@@ -6,27 +6,27 @@ define amdgpu_kernel void @madak_f16(
 ; SI-LABEL: madak_f16:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; SI-NEXT:    s_mov_b32 s11, 0xf000
-; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_mov_b32 s2, s10
-; SI-NEXT:    s_mov_b32 s3, s11
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s12, s6
 ; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s14, s10
-; SI-NEXT:    s_mov_b32 s15, s11
-; SI-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
+; SI-NEXT:    s_mov_b32 s14, s2
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    buffer_load_ushort v0, off, s[8:11], 0
 ; SI-NEXT:    buffer_load_ushort v1, off, s[12:15], 0
-; SI-NEXT:    s_mov_b32 s8, s4
-; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
 ; SI-NEXT:    v_madak_f32 v0, v1, v0, 0x41200000
 ; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: madak_f16:
@@ -68,38 +68,40 @@ define amdgpu_kernel void @madak_f16_use_2(
 ; SI-LABEL: madak_f16_use_2:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x11
-; SI-NEXT:    s_mov_b32 s15, 0xf000
-; SI-NEXT:    s_mov_b32 s14, -1
-; SI-NEXT:    s_mov_b32 s2, s14
+; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x11
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s18, s2
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s16, s10
-; SI-NEXT:    s_mov_b32 s17, s11
-; SI-NEXT:    s_mov_b32 s10, s14
-; SI-NEXT:    s_mov_b32 s11, s15
-; SI-NEXT:    s_mov_b32 s3, s15
-; SI-NEXT:    s_mov_b32 s18, s14
-; SI-NEXT:    s_mov_b32 s19, s15
-; SI-NEXT:    buffer_load_ushort v0, off, s[8:11], 0
-; SI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
-; SI-NEXT:    buffer_load_ushort v2, off, s[0:3], 0
-; SI-NEXT:    v_mov_b32_e32 v3, 0x41200000
-; SI-NEXT:    s_mov_b32 s12, s6
-; SI-NEXT:    s_mov_b32 s13, s7
-; SI-NEXT:    s_mov_b32 s6, s14
-; SI-NEXT:    s_mov_b32 s7, s15
+; SI-NEXT:    s_mov_b32 s16, s8
+; SI-NEXT:    s_mov_b32 s17, s9
+; SI-NEXT:    s_mov_b32 s19, s3
+; SI-NEXT:    s_mov_b32 s8, s10
+; SI-NEXT:    s_mov_b32 s9, s11
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
+; SI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
+; SI-NEXT:    s_mov_b32 s14, s2
+; SI-NEXT:    s_mov_b32 s15, s3
+; SI-NEXT:    buffer_load_ushort v3, off, s[12:15], 0
+; SI-NEXT:    v_mov_b32_e32 v2, 0x41200000
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s8, s6
+; SI-NEXT:    s_mov_b32 s9, s7
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; SI-NEXT:    s_waitcnt vmcnt(1)
 ; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
 ; SI-NEXT:    v_madak_f32 v1, v0, v1, 0x41200000
-; SI-NEXT:    v_mac_f32_e32 v3, v0, v2
+; SI-NEXT:    v_mac_f32_e32 v2, v0, v3
 ; SI-NEXT:    v_cvt_f16_f32_e32 v0, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v3
-; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
-; SI-NEXT:    buffer_store_short v1, off, s[12:15], 0
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_short v1, off, s[8:11], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: madak_f16_use_2:
@@ -108,29 +110,33 @@ define amdgpu_kernel void @madak_f16_use_2(
 ; VI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x44
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s14, s2
+; VI-NEXT:    s_mov_b32 s18, s2
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_mov_b32 s16, s10
-; VI-NEXT:    s_mov_b32 s17, s11
+; VI-NEXT:    s_mov_b32 s16, s8
+; VI-NEXT:    s_mov_b32 s17, s9
+; VI-NEXT:    s_mov_b32 s19, s3
+; VI-NEXT:    s_mov_b32 s8, s10
+; VI-NEXT:    s_mov_b32 s9, s11
 ; VI-NEXT:    s_mov_b32 s10, s2
 ; VI-NEXT:    s_mov_b32 s11, s3
+; VI-NEXT:    buffer_load_ushort v0, off, s[16:19], 0
+; VI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT:    s_mov_b32 s14, s2
 ; VI-NEXT:    s_mov_b32 s15, s3
-; VI-NEXT:    s_mov_b32 s18, s2
-; VI-NEXT:    s_mov_b32 s19, s3
-; VI-NEXT:    buffer_load_ushort v0, off, s[8:11], 0
-; VI-NEXT:    buffer_load_ushort v1, off, s[16:19], 0
 ; VI-NEXT:    buffer_load_ushort v3, off, s[12:15], 0
 ; VI-NEXT:    v_mov_b32_e32 v2, 0x4900
-; VI-NEXT:    s_mov_b32 s0, s6
-; VI-NEXT:    s_mov_b32 s1, s7
+; VI-NEXT:    s_mov_b32 s0, s4
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
 ; VI-NEXT:    s_mov_b32 s6, s2
 ; VI-NEXT:    s_mov_b32 s7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_madak_f16 v1, v0, v1, 0x4900
 ; VI-NEXT:    s_waitcnt vmcnt(0)
 ; VI-NEXT:    v_mac_f16_e32 v2, v0, v3
-; VI-NEXT:    buffer_store_short v1, off, s[4:7], 0
-; VI-NEXT:    buffer_store_short v2, off, s[0:3], 0
+; VI-NEXT:    buffer_store_short v1, off, s[0:3], 0
+; VI-NEXT:    buffer_store_short v2, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
     half addrspace(1)* %r0,
     half addrspace(1)* %r1,

diff  --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
index 9c7b2fcb8c9a..28fb550c44af 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
@@ -158,14 +158,17 @@ bb15:                                             ; preds = %.preheader
 ; OPT:  %bc = bitcast <6 x double> %0 to <6 x i64>
 ; OPT:  %1 = extractelement <6 x i64> %bc, i32 %tmp20
 
-; TODO: Fix selection to eliminate scratch
-
 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_double:
-; GCN-COUNT-2: buffer_store_dword
+
+; GCN-ALLOCA-COUNT-2: buffer_store_dword
+; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32
 
 ; GCN: s_cbranch
 
-; GCN-COUNT-2: buffer_load_dword
+; GCN-ALLOCA-COUNT-2: buffer_load_dword
+; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32
+
+; GCN-PROMOTE: ScratchSize: 0
 
 define amdgpu_kernel void @vector_write_read_bitcast_to_double(double addrspace(1)* %arg) {
 bb:
@@ -220,14 +223,17 @@ bb15:                                             ; preds = %.preheader
 ; OPT: .preheader:
 ; OPT:  %1 = extractelement <6 x i64> %0, i32 %tmp18
 
-; TODO: Fix selection to eliminate scratch
-
 ; GCN-LABEL: {{^}}vector_write_read_bitcast_to_i64:
-; GCN-COUNT-2: buffer_store_dword
+
+; GCN-ALLOCA-COUNT-2: buffer_store_dword
+; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32
 
 ; GCN: s_cbranch
 
-; GCN-COUNT-2: buffer_load_dword
+; GCN-ALLOCA-COUNT-2: buffer_load_dword
+; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32
+
+; GCN-PROMOTE: ScratchSize: 0
 
 define amdgpu_kernel void @vector_write_read_bitcast_to_i64(i64 addrspace(1)* %arg) {
 bb:


        


More information about the llvm-commits mailing list