[llvm] 7386ad4 - Revert "[AMDGPU][GlobalISel] Stop foldInsertEltToCmpSelect from changing reg banks"

Mirko Brkusanin via llvm-commits llvm-commits at lists.llvm.org
Wed May 26 02:59:55 PDT 2021


Author: Mirko Brkusanin
Date: 2021-05-26T11:57:41+02:00
New Revision: 7386ad4e9e26df93876ae309920e4f7e72288bae

URL: https://github.com/llvm/llvm-project/commit/7386ad4e9e26df93876ae309920e4f7e72288bae
DIFF: https://github.com/llvm/llvm-project/commit/7386ad4e9e26df93876ae309920e4f7e72288bae.diff

LOG: Revert "[AMDGPU][GlobalISel] Stop foldInsertEltToCmpSelect from changing reg banks"

This reverts commit 18c5444702893fd63b0a99ec7133dd714284f9d2.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 770a9f764188c..1ebc6892a7667 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -1977,22 +1977,6 @@ bool AMDGPURegisterBankInfo::foldExtractEltToCmpSelect(
   return true;
 }
 
-// Insert a cross regbank copy for a register if it already has a bank that
-// 
diff ers from the one we want to set.
-static Register constrainRegToBank(MachineRegisterInfo &MRI,
-                                   MachineIRBuilder &B, Register &Reg,
-                                   const RegisterBank &Bank) {
-  const RegisterBank *CurrBank = MRI.getRegBankOrNull(Reg);
-  if (CurrBank && *CurrBank != Bank) {
-    Register Copy = B.buildCopy(MRI.getType(Reg), Reg).getReg(0);
-    MRI.setRegBank(Copy, Bank);
-    return Copy;
-  }
-
-  MRI.setRegBank(Reg, Bank);
-  return Reg;
-}
-
 bool AMDGPURegisterBankInfo::foldInsertEltToCmpSelect(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   const OperandsMapper &OpdMapper) const {
@@ -2056,14 +2040,13 @@ bool AMDGPURegisterBankInfo::foldInsertEltToCmpSelect(
     MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank);
 
     for (unsigned L = 0; L < NumLanes; ++L) {
-      Register Op0 = constrainRegToBank(MRI, B, InsRegs[L], DstBank);
-      Register Op1 = UnmergeToEltTy.getReg(I * NumLanes + L);
-      Op1 = constrainRegToBank(MRI, B, Op1, DstBank);
+      auto S = B.buildSelect(EltTy, Cmp, InsRegs[L],
+                             UnmergeToEltTy.getReg(I * NumLanes + L));
 
-      Register Select = B.buildSelect(EltTy, Cmp, Op0, Op1).getReg(0);
-      MRI.setRegBank(Select, DstBank);
+      for (unsigned N : { 0, 2, 3 })
+        MRI.setRegBank(S->getOperand(N).getReg(), DstBank);
 
-      Ops[I * NumLanes + L] = Select;
+      Ops[I * NumLanes + L] = S->getOperand(0).getReg();
     }
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
index 76b83e3c08ee5..adf7a49ae0c73 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll
@@ -7,18 +7,18 @@
 define amdgpu_ps void @insertelement_s_v2i8_s_s(<2 x i8> addrspace(4)* inreg %ptr, i8 inreg %val, i32 inreg %idx) {
 ; GFX9-LABEL: insertelement_s_v2i8_s_s:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0
-; GFX9-NEXT:    global_load_ushort v0, v0, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    global_load_ushort v1, v1, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 8, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 8, v1
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 1
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
-; GFX9-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GFX9-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX9-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT:    v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    global_store_short v[0:1], v2, off
@@ -29,13 +29,13 @@ define amdgpu_ps void @insertelement_s_v2i8_s_s(<2 x i8> addrspace(4)* inreg %pt
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX8-NEXT:    flat_load_ushort v0, v[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v1, s4
+; GFX8-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 8, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 1
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -50,35 +50,36 @@ define amdgpu_ps void @insertelement_s_v2i8_s_s(<2 x i8> addrspace(4)* inreg %pt
 ; GFX7-NEXT:    s_mov_b32 s1, s3
 ; GFX7-NEXT:    s_mov_b32 s2, -1
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
-; GFX7-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
-; GFX7-NEXT:    v_mov_b32_e32 v2, s4
+; GFX7-NEXT:    buffer_load_ushort v1, off, s[0:3], 0
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX7-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
-; GFX7-NEXT:    v_mov_b32_e32 v1, 0xff
+; GFX7-NEXT:    v_mov_b32_e32 v2, 0xff
 ; GFX7-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_lshrrev_b32_e32 v3, 8, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX7-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 1
-; GFX7-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
-; GFX7-NEXT:    v_and_b32_e32 v0, v0, v1
-; GFX7-NEXT:    v_and_b32_e32 v1, v2, v1
-; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GFX7-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX7-NEXT:    v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT:    v_and_b32_e32 v1, v1, v2
+; GFX7-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; GFX7-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX7-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; GFX7-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: insertelement_s_v2i8_s_s:
 ; GFX10:       ; %bb.0:
-; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s5, 1
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, s5, 0
-; GFX10-NEXT:    global_load_ushort v0, v0, s[2:3]
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s4, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, s0
+; GFX10-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-NEXT:    v_mov_b32_e32 v0, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s5, 1
 ; GFX10-NEXT:    s_movk_i32 s0, 0xff
-; GFX10-NEXT:    v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    global_load_ushort v1, v1, s[2:3]
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    v_lshrrev_b32_e32 v2, 8, v1
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v0, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s5, 0
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT:    v_and_b32_sdwa v1, v2, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX10-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, 0
@@ -94,13 +95,13 @@ define amdgpu_ps void @insertelement_v_v2i8_s_s(<2 x i8> addrspace(1)* %ptr, i8
 ; GFX9-LABEL: insertelement_v_v2i8_s_s:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    global_load_ushort v0, v[0:1], off
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 8, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 1
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GFX9-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; GFX9-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -112,13 +113,13 @@ define amdgpu_ps void @insertelement_v_v2i8_s_s(<2 x i8> addrspace(1)* %ptr, i8
 ; GFX8-LABEL: insertelement_v_v2i8_s_s:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    flat_load_ushort v0, v[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v1, s2
+; GFX8-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 8, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 1
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -152,14 +153,15 @@ define amdgpu_ps void @insertelement_v_v2i8_s_s(<2 x i8> addrspace(1)* %ptr, i8
 ; GFX10-LABEL: insertelement_v_v2i8_s_s:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    global_load_ushort v0, v[0:1], off
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s3, 1
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, s3, 0
+; GFX10-NEXT:    v_mov_b32_e32 v2, s2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 1
+; GFX10-NEXT:    s_movk_i32 s0, 0xff
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s2, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s2, s0
-; GFX10-NEXT:    s_movk_i32 s0, 0xff
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 0
 ; GFX10-NEXT:    v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc_lo
 ; GFX10-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, 0
@@ -257,18 +259,18 @@ define amdgpu_ps void @insertelement_s_v2i8_v_s(<2 x i8> addrspace(4)* inreg %pt
 define amdgpu_ps void @insertelement_s_v2i8_s_v(<2 x i8> addrspace(4)* inreg %ptr, i8 inreg %val, i32 %idx) {
 ; GFX9-LABEL: insertelement_s_v2i8_s_v:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    global_load_ushort v1, v1, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    global_load_ushort v2, v2, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 8, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v1, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xff, v0
 ; GFX9-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
-; GFX9-NEXT:    v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    v_or_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    global_store_short v[0:1], v2, off
@@ -279,13 +281,13 @@ define amdgpu_ps void @insertelement_s_v2i8_s_v(<2 x i8> addrspace(4)* inreg %pt
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX8-NEXT:    flat_load_ushort v1, v[1:2]
-; GFX8-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8-NEXT:    v_mov_b32_e32 v3, s4
 ; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 8, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xff, v0
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
 ; GFX8-NEXT:    v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -300,18 +302,18 @@ define amdgpu_ps void @insertelement_s_v2i8_s_v(<2 x i8> addrspace(4)* inreg %pt
 ; GFX7-NEXT:    s_mov_b32 s1, s3
 ; GFX7-NEXT:    s_mov_b32 s2, -1
 ; GFX7-NEXT:    s_mov_b32 s3, 0xf000
-; GFX7-NEXT:    buffer_load_ushort v1, off, s[0:3], 0
-; GFX7-NEXT:    v_mov_b32_e32 v3, s4
+; GFX7-NEXT:    buffer_load_ushort v2, off, s[0:3], 0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7-NEXT:    v_mov_b32_e32 v2, 0xff
+; GFX7-NEXT:    v_mov_b32_e32 v3, 0xff
 ; GFX7-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-NEXT:    v_lshrrev_b32_e32 v4, 8, v1
-; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX7-NEXT:    v_lshrrev_b32_e32 v4, 8, v2
+; GFX7-NEXT:    v_cndmask_b32_e32 v2, v2, v1, vcc
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v4, v3, vcc
-; GFX7-NEXT:    v_and_b32_e32 v0, v0, v2
-; GFX7-NEXT:    v_and_b32_e32 v1, v1, v2
+; GFX7-NEXT:    v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX7-NEXT:    v_and_b32_e32 v0, v0, v3
+; GFX7-NEXT:    v_and_b32_e32 v1, v2, v3
 ; GFX7-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
 ; GFX7-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX7-NEXT:    buffer_store_short v0, off, s[0:3], 0
@@ -319,16 +321,17 @@ define amdgpu_ps void @insertelement_s_v2i8_s_v(<2 x i8> addrspace(4)* inreg %pt
 ;
 ; GFX10-LABEL: insertelement_s_v2i8_s_v:
 ; GFX10:       ; %bb.0:
-; GFX10-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX10-NEXT:    s_movk_i32 s0, 0xff
-; GFX10-NEXT:    global_load_ushort v1, v1, s[2:3]
+; GFX10-NEXT:    global_load_ushort v2, v2, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v2, 8, v1
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s4, vcc_lo
+; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 8, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v1, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s4, vcc_lo
-; GFX10-NEXT:    v_and_b32_sdwa v1, v2, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX10-NEXT:    v_and_b32_sdwa v1, v3, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX10-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, 0
@@ -427,13 +430,13 @@ define amdgpu_ps void @insertelement_v_v2i8_s_v(<2 x i8> addrspace(1)* %ptr, i8
 ; GFX9-LABEL: insertelement_v_v2i8_s_v:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    global_load_ushort v0, v[0:1], off
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s2
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 8, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GFX9-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; GFX9-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -445,13 +448,13 @@ define amdgpu_ps void @insertelement_v_v2i8_s_v(<2 x i8> addrspace(1)* %ptr, i8
 ; GFX8-LABEL: insertelement_v_v2i8_s_v:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    flat_load_ushort v0, v[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v1, s2
+; GFX8-NEXT:    v_mov_b32_e32 v3, s2
 ; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 8, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -462,14 +465,14 @@ define amdgpu_ps void @insertelement_v_v2i8_s_v(<2 x i8> addrspace(1)* %ptr, i8
 ;
 ; GFX7-LABEL: insertelement_v_v2i8_s_v:
 ; GFX7:       ; %bb.0:
-; GFX7-NEXT:    s_mov_b32 s6, 0
-; GFX7-NEXT:    s_mov_b32 s7, 0xf000
-; GFX7-NEXT:    s_mov_b64 s[4:5], 0
-; GFX7-NEXT:    buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64
 ; GFX7-NEXT:    v_mov_b32_e32 v3, s2
+; GFX7-NEXT:    s_mov_b32 s2, 0
+; GFX7-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[0:1], 0
+; GFX7-NEXT:    buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
 ; GFX7-NEXT:    v_mov_b32_e32 v1, 0xff
-; GFX7-NEXT:    s_mov_b32 s6, -1
+; GFX7-NEXT:    s_mov_b32 s2, -1
 ; GFX7-NEXT:    s_waitcnt vmcnt(0)
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v4, 8, v0
 ; GFX7-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
@@ -479,20 +482,21 @@ define amdgpu_ps void @insertelement_v_v2i8_s_v(<2 x i8> addrspace(1)* %ptr, i8
 ; GFX7-NEXT:    v_and_b32_e32 v1, v2, v1
 ; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
 ; GFX7-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; GFX7-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; GFX7-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: insertelement_v_v2i8_s_v:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    global_load_ushort v0, v[0:1], off
+; GFX10-NEXT:    v_mov_b32_e32 v3, s2
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
 ; GFX10-NEXT:    s_movk_i32 s0, 0xff
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc_lo
 ; GFX10-NEXT:    v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, 0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
index 562a4678e36d6..b5ccf4708ae5b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; XUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GPRIDX %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GPRIDX %s
 ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=MOVREL %s
-; XUN: not --crash llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not --crash llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck -check-prefix=ERR %s
 
 ; FIXME: Need constant bus fixup pre-gfx10 for movrel
 ; ERR: Bad machine code: VOP* instruction violates constant bus restriction
@@ -192,8 +192,8 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_v(<8 x float> inreg %v
 ; GPRIDX-NEXT:    s_mov_b32 s4, s6
 ; GPRIDX-NEXT:    s_mov_b32 s6, s8
 ; GPRIDX-NEXT:    v_mov_b32_e32 v15, s7
-; GPRIDX-NEXT:    v_mov_b32_e32 v8, s0
 ; GPRIDX-NEXT:    v_mov_b32_e32 v7, s10
+; GPRIDX-NEXT:    v_mov_b32_e32 v8, s0
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v7, vcc
 ; GPRIDX-NEXT:    v_mov_b32_e32 v9, s1
@@ -230,30 +230,31 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_v(<8 x float> inreg %v
 ; MOVREL-NEXT:    s_mov_b32 s4, s6
 ; MOVREL-NEXT:    s_mov_b32 s6, s8
 ; MOVREL-NEXT:    v_mov_b32_e32 v15, s7
+; MOVREL-NEXT:    v_mov_b32_e32 v7, s10
 ; MOVREL-NEXT:    v_mov_b32_e32 v8, s0
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; MOVREL-NEXT:    v_mov_b32_e32 v9, s1
 ; MOVREL-NEXT:    v_mov_b32_e32 v10, s2
 ; MOVREL-NEXT:    v_mov_b32_e32 v11, s3
 ; MOVREL-NEXT:    v_mov_b32_e32 v12, s4
-; MOVREL-NEXT:    v_cndmask_b32_e64 v8, v8, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v8, v8, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; MOVREL-NEXT:    v_mov_b32_e32 v13, s5
 ; MOVREL-NEXT:    v_mov_b32_e32 v14, s6
-; MOVREL-NEXT:    v_cndmask_b32_e64 v1, v9, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v9, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; MOVREL-NEXT:    v_cndmask_b32_e64 v2, v10, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v10, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; MOVREL-NEXT:    v_cndmask_b32_e64 v3, v11, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v3, v11, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; MOVREL-NEXT:    v_cndmask_b32_e64 v4, v12, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v4, v12, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; MOVREL-NEXT:    v_cndmask_b32_e64 v5, v13, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v5, v13, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; MOVREL-NEXT:    v_cndmask_b32_e64 v6, v14, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v6, v14, v7, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
 ; MOVREL-NEXT:    v_mov_b32_e32 v0, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v7, v15, s10, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v7, v15, v7, vcc_lo
 ; MOVREL-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <8 x float> %vec, float %val, i32 %idx
@@ -363,22 +364,23 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_s_s(<8 x float> %vec, fl
 ;
 ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_s:
 ; MOVREL:       ; %bb.0: ; %entry
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 0
-; MOVREL-NEXT:    v_cndmask_b32_e64 v0, v0, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 1
-; MOVREL-NEXT:    v_cndmask_b32_e64 v1, v1, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 2
-; MOVREL-NEXT:    v_cndmask_b32_e64 v2, v2, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 3
-; MOVREL-NEXT:    v_cndmask_b32_e64 v3, v3, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 4
-; MOVREL-NEXT:    v_cndmask_b32_e64 v4, v4, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 5
-; MOVREL-NEXT:    v_cndmask_b32_e64 v5, v5, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 6
-; MOVREL-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
-; MOVREL-NEXT:    v_cmp_eq_u32_e64 s0, s3, 7
-; MOVREL-NEXT:    v_cndmask_b32_e64 v7, v7, s2, s0
+; MOVREL-NEXT:    v_mov_b32_e32 v8, s2
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 0
+; MOVREL-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 1
+; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 2
+; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 3
+; MOVREL-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 4
+; MOVREL-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 5
+; MOVREL-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 6
+; MOVREL-NEXT:    v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; MOVREL-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s3, 7
+; MOVREL-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc_lo
 ; MOVREL-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <8 x float> %vec, float %val, i32 %idx
@@ -490,22 +492,23 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_s_v(<8 x float> %vec, fl
 ;
 ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_v:
 ; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    v_mov_b32_e32 v9, s2
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v0, v0, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v0, v0, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v2, v2, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v3, v3, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v4, v4, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v4, v4, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v5, v5, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v6, v6, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v6, v6, v9, vcc_lo
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v8
-; MOVREL-NEXT:    v_cndmask_b32_e64 v7, v7, s2, vcc_lo
+; MOVREL-NEXT:    v_cndmask_b32_e32 v7, v7, v9, vcc_lo
 ; MOVREL-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <8 x float> %vec, float %val, i32 %idx

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir
index a77b2c8bdf2f0..695b81cb63082 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir
@@ -46,26 +46,22 @@ body: |
 
     ; CHECK-LABEL: name: insert_vector_elt_v4i32_v_s_s
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
-    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
     ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
     ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
     ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV]]
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY1]], [[UV]]
     ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
     ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY5]], [[UV1]]
+    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY1]], [[UV1]]
     ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
     ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C2]]
-    ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY6]], [[UV2]]
+    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY1]], [[UV2]]
     ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3
     ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C3]]
-    ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY7]], [[UV3]]
+    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY1]], [[UV3]]
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32)
     ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
@@ -123,26 +119,22 @@ body: |
     ; CHECK-LABEL: name: insert_vector_elt_v4i32_s_s_v
     ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr4
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $sgpr4
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY3:%[0-9]+]]:vgpr(<4 x s32>) = COPY [[COPY]](<4 x s32>)
     ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY3]](<4 x s32>)
     ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
     ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV]]
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY1]], [[UV]]
     ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
     ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY5]], [[UV1]]
+    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY1]], [[UV1]]
     ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
     ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]]
-    ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY6]], [[UV2]]
+    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY1]], [[UV2]]
     ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3
     ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C3]]
-    ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY7]], [[UV3]]
+    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY1]], [[UV3]]
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32)
     ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     %0:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
@@ -201,25 +193,21 @@ body: |
     ; CHECK-LABEL: name: insert_vector_elt_var_v4i32_v_s_v
     ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $sgpr4, $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
-    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr4
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $sgpr4
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
     ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
     ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
-    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY3]], [[UV]]
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY1]], [[UV]]
     ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
     ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY4]], [[UV1]]
+    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY1]], [[UV1]]
     ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
     ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY5]], [[UV2]]
+    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY1]], [[UV2]]
     ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3
     ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C3]]
-    ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY6]], [[UV3]]
+    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY1]], [[UV3]]
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32)
     ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
@@ -401,56 +389,40 @@ body: |
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr16_sgpr17
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY3:%[0-9]+]]:vgpr(<8 x s64>) = COPY [[COPY]](<8 x s64>)
-    ; CHECK: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+    ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
     ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32), [[UV16:%[0-9]+]]:vgpr(s32), [[UV17:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY3]](<8 x s64>)
     ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
     ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV2]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY5]], [[UV3]]
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[UV]], [[UV2]]
+    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[UV1]], [[UV3]]
     ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
     ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
-    ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY6]], [[UV4]]
-    ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY7]], [[UV5]]
+    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[UV]], [[UV4]]
+    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[UV1]], [[UV5]]
     ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
     ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]]
-    ; CHECK: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT4:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY8]], [[UV6]]
-    ; CHECK: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT5:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY9]], [[UV7]]
+    ; CHECK: [[SELECT4:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[UV]], [[UV6]]
+    ; CHECK: [[SELECT5:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[UV1]], [[UV7]]
     ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3
     ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C3]]
-    ; CHECK: [[COPY10:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT6:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY10]], [[UV8]]
-    ; CHECK: [[COPY11:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT7:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY11]], [[UV9]]
+    ; CHECK: [[SELECT6:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[UV]], [[UV8]]
+    ; CHECK: [[SELECT7:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[UV1]], [[UV9]]
     ; CHECK: [[C4:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4
     ; CHECK: [[ICMP4:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C4]]
-    ; CHECK: [[COPY12:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT8:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[COPY12]], [[UV10]]
-    ; CHECK: [[COPY13:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT9:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[COPY13]], [[UV11]]
+    ; CHECK: [[SELECT8:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[UV]], [[UV10]]
+    ; CHECK: [[SELECT9:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[UV1]], [[UV11]]
     ; CHECK: [[C5:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 5
     ; CHECK: [[ICMP5:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C5]]
-    ; CHECK: [[COPY14:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT10:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[COPY14]], [[UV12]]
-    ; CHECK: [[COPY15:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT11:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[COPY15]], [[UV13]]
+    ; CHECK: [[SELECT10:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[UV]], [[UV12]]
+    ; CHECK: [[SELECT11:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[UV1]], [[UV13]]
     ; CHECK: [[C6:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 6
     ; CHECK: [[ICMP6:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C6]]
-    ; CHECK: [[COPY16:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT12:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[COPY16]], [[UV14]]
-    ; CHECK: [[COPY17:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT13:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[COPY17]], [[UV15]]
+    ; CHECK: [[SELECT12:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[UV]], [[UV14]]
+    ; CHECK: [[SELECT13:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[UV1]], [[UV15]]
     ; CHECK: [[C7:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 7
     ; CHECK: [[ICMP7:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C7]]
-    ; CHECK: [[COPY18:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT14:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[COPY18]], [[UV16]]
-    ; CHECK: [[COPY19:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT15:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[COPY19]], [[UV17]]
+    ; CHECK: [[SELECT14:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[UV]], [[UV16]]
+    ; CHECK: [[SELECT15:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[UV1]], [[UV17]]
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<16 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32), [[SELECT4]](s32), [[SELECT5]](s32), [[SELECT6]](s32), [[SELECT7]](s32), [[SELECT8]](s32), [[SELECT9]](s32), [[SELECT10]](s32), [[SELECT11]](s32), [[SELECT12]](s32), [[SELECT13]](s32), [[SELECT14]](s32), [[SELECT15]](s32)
     ; CHECK: [[BITCAST:%[0-9]+]]:vgpr(<8 x s64>) = G_BITCAST [[BUILD_VECTOR]](<16 x s32>)
     ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BITCAST]](<8 x s64>)
@@ -564,56 +536,40 @@ body: |
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(<8 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr16
-    ; CHECK: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+    ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
     ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32), [[UV16:%[0-9]+]]:vgpr(s32), [[UV17:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](<8 x s64>)
     ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
     ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]]
-    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY3]], [[UV2]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV3]]
+    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[UV]], [[UV2]]
+    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[UV1]], [[UV3]]
     ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
     ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY5]], [[UV4]]
-    ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY6]], [[UV5]]
+    ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[UV]], [[UV4]]
+    ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[UV1]], [[UV5]]
     ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
     ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]]
-    ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT4:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY7]], [[UV6]]
-    ; CHECK: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT5:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY8]], [[UV7]]
+    ; CHECK: [[SELECT4:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[UV]], [[UV6]]
+    ; CHECK: [[SELECT5:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[UV1]], [[UV7]]
     ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3
     ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C3]]
-    ; CHECK: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT6:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY9]], [[UV8]]
-    ; CHECK: [[COPY10:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT7:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY10]], [[UV9]]
+    ; CHECK: [[SELECT6:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[UV]], [[UV8]]
+    ; CHECK: [[SELECT7:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[UV1]], [[UV9]]
     ; CHECK: [[C4:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4
     ; CHECK: [[ICMP4:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C4]]
-    ; CHECK: [[COPY11:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT8:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[COPY11]], [[UV10]]
-    ; CHECK: [[COPY12:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT9:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[COPY12]], [[UV11]]
+    ; CHECK: [[SELECT8:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[UV]], [[UV10]]
+    ; CHECK: [[SELECT9:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP4]](s1), [[UV1]], [[UV11]]
     ; CHECK: [[C5:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 5
     ; CHECK: [[ICMP5:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C5]]
-    ; CHECK: [[COPY13:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT10:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[COPY13]], [[UV12]]
-    ; CHECK: [[COPY14:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT11:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[COPY14]], [[UV13]]
+    ; CHECK: [[SELECT10:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[UV]], [[UV12]]
+    ; CHECK: [[SELECT11:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP5]](s1), [[UV1]], [[UV13]]
     ; CHECK: [[C6:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 6
     ; CHECK: [[ICMP6:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C6]]
-    ; CHECK: [[COPY15:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT12:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[COPY15]], [[UV14]]
-    ; CHECK: [[COPY16:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT13:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[COPY16]], [[UV15]]
+    ; CHECK: [[SELECT12:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[UV]], [[UV14]]
+    ; CHECK: [[SELECT13:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP6]](s1), [[UV1]], [[UV15]]
     ; CHECK: [[C7:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 7
     ; CHECK: [[ICMP7:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C7]]
-    ; CHECK: [[COPY17:%[0-9]+]]:vgpr(s32) = COPY [[UV]](s32)
-    ; CHECK: [[SELECT14:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[COPY17]], [[UV16]]
-    ; CHECK: [[COPY18:%[0-9]+]]:vgpr(s32) = COPY [[UV1]](s32)
-    ; CHECK: [[SELECT15:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[COPY18]], [[UV17]]
+    ; CHECK: [[SELECT14:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[UV]], [[UV16]]
+    ; CHECK: [[SELECT15:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP7]](s1), [[UV1]], [[UV17]]
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<16 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32), [[SELECT4]](s32), [[SELECT5]](s32), [[SELECT6]](s32), [[SELECT7]](s32), [[SELECT8]](s32), [[SELECT9]](s32), [[SELECT10]](s32), [[SELECT11]](s32), [[SELECT12]](s32), [[SELECT13]](s32), [[SELECT14]](s32), [[SELECT15]](s32)
     ; CHECK: [[BITCAST:%[0-9]+]]:vgpr(<8 x s64>) = G_BITCAST [[BUILD_VECTOR]](<16 x s32>)
     ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BITCAST]](<8 x s64>)
@@ -746,44 +702,3 @@ body: |
   bb.1:
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY %3
 ...
-
----
-name:            insert_vector_elt_with_s_buffer_load
-legalized:       true
-regBankSelected: false
-selected:        false
-failedISel:      false
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5
-
-    ; CHECK-LABEL: name: insert_vector_elt_with_s_buffer_load
-    ; CHECK: liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5
-    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s32>) = COPY $sgpr4_sgpr5
-    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
-    ; CHECK: [[AMDGPU_S_BUFFER_LOAD:%[0-9]+]]:sgpr(s32) = G_AMDGPU_S_BUFFER_LOAD [[COPY]](<4 x s32>), [[C]](s32), 0 :: (dereferenceable invariant load 4)
-    ; CHECK: [[COPY3:%[0-9]+]]:vgpr(<2 x s32>) = COPY [[COPY1]](<2 x s32>)
-    ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY3]](<2 x s32>)
-    ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
-    ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[AMDGPU_S_BUFFER_LOAD]](s32)
-    ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV]]
-    ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[AMDGPU_S_BUFFER_LOAD]](s32)
-    ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY5]], [[UV1]]
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32)
-    ; CHECK: S_ENDPGM 0, implicit [[BUILD_VECTOR]](<2 x s32>)
-    %0:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-    %1:_(<2 x s32>) = COPY $sgpr4_sgpr5
-    %2:_(s32) = COPY $vgpr0
-    %3:_(s32) = G_CONSTANT i32 0
-    %4:_(s32) = G_AMDGPU_S_BUFFER_LOAD %0(<4 x s32>), %3(s32), 0 :: (dereferenceable invariant load 4)
-    %5:_(<2 x s32>) = G_INSERT_VECTOR_ELT %1, %4(s32), %2(s32)
-
-    S_ENDPGM 0, implicit %5
-
-...


        


More information about the llvm-commits mailing list