[llvm] 0337017 - [AMDGPU] Use SGPR instead of SReg classes

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 23 04:07:26 PDT 2020


Author: Jay Foad
Date: 2020-04-23T11:45:22+01:00
New Revision: 0337017a9f001d8b3d5e8b0ed7192342bacaf443

URL: https://github.com/llvm/llvm-project/commit/0337017a9f001d8b3d5e8b0ed7192342bacaf443
DIFF: https://github.com/llvm/llvm-project/commit/0337017a9f001d8b3d5e8b0ed7192342bacaf443.diff

LOG: [AMDGPU] Use SGPR instead of SReg classes

12994a70cf7 did this for 128-bit classes:

    SGPR_128 only includes the real allocatable SGPRs, and SReg_128 adds
    the additional non-allocatable TTMP registers. There's no point in
    allocating SReg_128 vregs. This shrinks the size of the classes
    regalloc needs to consider, which is usually good.

This patch extends it to all classes > 64 bits, for consistency.

Differential Revision: https://reviews.llvm.org/D78622

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
    llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
    llvm/lib/Target/AMDGPU/SIRegisterInfo.td
    llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert-vector-elt.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll
    llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
    llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
    llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
    llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
    llvm/test/CodeGen/AMDGPU/memory_clause.mir
    llvm/test/CodeGen/AMDGPU/merge-image-load.mir
    llvm/test/CodeGen/AMDGPU/merge-image-sample.mir
    llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir
    llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 09fde4bc741a..1849b5ad03c7 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1938,7 +1938,7 @@ static int getRegClass(RegisterKind Is, unsigned RegWidth) {
       case 3: return AMDGPU::SGPR_96RegClassID;
       case 4: return AMDGPU::SGPR_128RegClassID;
       case 5: return AMDGPU::SGPR_160RegClassID;
-      case 6: return AMDGPU::SReg_192RegClassID;
+      case 6: return AMDGPU::SGPR_192RegClassID;
       case 8: return AMDGPU::SGPR_256RegClassID;
       case 16: return AMDGPU::SGPR_512RegClassID;
     }

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b32dfb665d79..4856e529c2c4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -141,10 +141,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
   addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
 
-  addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
+  addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass);
   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
 
-  addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
+  addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass);
   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
 
   if (Subtarget->has16BitInsts()) {
@@ -10600,22 +10600,22 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
         RC = &AMDGPU::SGPR_64RegClass;
         break;
       case 96:
-        RC = &AMDGPU::SReg_96RegClass;
+        RC = &AMDGPU::SGPR_96RegClass;
         break;
       case 128:
         RC = &AMDGPU::SGPR_128RegClass;
         break;
       case 160:
-        RC = &AMDGPU::SReg_160RegClass;
+        RC = &AMDGPU::SGPR_160RegClass;
         break;
       case 192:
-        RC = &AMDGPU::SReg_192RegClass;
+        RC = &AMDGPU::SGPR_192RegClass;
         break;
       case 256:
-        RC = &AMDGPU::SReg_256RegClass;
+        RC = &AMDGPU::SGPR_256RegClass;
         break;
       case 512:
-        RC = &AMDGPU::SReg_512RegClass;
+        RC = &AMDGPU::SGPR_512RegClass;
         break;
       }
       break;

diff  --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index a2b42fcf7021..d22eeea52754 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -1541,9 +1541,9 @@ SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI,
     case 4:
       return &AMDGPU::SGPR_128RegClass;
     case 8:
-      return &AMDGPU::SReg_256RegClass;
+      return &AMDGPU::SGPR_256RegClass;
     case 16:
-      return &AMDGPU::SReg_512RegClass;
+      return &AMDGPU::SGPR_512RegClass;
     }
   } else {
     switch (CI.Width + Paired.Width) {

diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 40252f43d033..6f668b3b720a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1340,19 +1340,19 @@ SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
   case 64:
     return &AMDGPU::SReg_64RegClass;
   case 96:
-    return &AMDGPU::SReg_96RegClass;
+    return &AMDGPU::SGPR_96RegClass;
   case 128:
-    return &AMDGPU::SReg_128RegClass;
+    return &AMDGPU::SGPR_128RegClass;
   case 160:
-    return &AMDGPU::SReg_160RegClass;
+    return &AMDGPU::SGPR_160RegClass;
   case 192:
-    return &AMDGPU::SReg_192RegClass;
+    return &AMDGPU::SGPR_192RegClass;
   case 256:
-    return &AMDGPU::SReg_256RegClass;
+    return &AMDGPU::SGPR_256RegClass;
   case 512:
-    return &AMDGPU::SReg_512RegClass;
+    return &AMDGPU::SGPR_512RegClass;
   case 1024:
-    return &AMDGPU::SReg_1024RegClass;
+    return &AMDGPU::SGPR_1024RegClass;
   default:
     return nullptr;
   }
@@ -1455,8 +1455,6 @@ SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
   unsigned Size = getRegSizeInBits(*VRC);
   if (Size == 32)
     return &AMDGPU::SGPR_32RegClass;
-  if (Size == 128)
-    return &AMDGPU::SGPR_128RegClass;
   const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
   assert(SRC && "Invalid register class size");
   return SRC;
@@ -1472,8 +1470,6 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
   if (isSGPRClass(RC)) {
     if (Size == 32)
       RC = &AMDGPU::SGPR_32RegClass;
-    else if (Size == 128)
-      RC = &AMDGPU::SGPR_128RegClass;
     else
       RC = getSGPRClassForBitWidth(Size);
   } else if (hasAGPRs(RC)) {
@@ -1698,8 +1694,6 @@ SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
     return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
                     : &AMDGPU::SReg_64_XEXECRegClass;
   case AMDGPU::SGPRRegBankID:
-    if (Size == 128)
-      return &AMDGPU::SGPR_128RegClass;
     return getSGPRClassForBitWidth(std::max(32u, Size));
   default:
     llvm_unreachable("unknown register bank");

diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 7d51fca8c22e..f493f937430c 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -680,7 +680,6 @@ def TTMP_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v2i64], 32,
 
 def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v2i64, v2f64], 32,
                              (add SGPR_128, TTMP_128)> {
-  let AllocationPriority = 15;
   let isAllocatable = 0;
 }
 
@@ -695,14 +694,20 @@ def SGPR_160 : RegisterClass<"AMDGPU", [v5i32, v5f32], 32,
 
 def SReg_160 : RegisterClass<"AMDGPU", [v5i32, v5f32], 32,
                              (add SGPR_160)> {
-  let AllocationPriority = 16;
+  // FIXME: Should be isAllocatable = 0, but that causes all TableGen-generated
+  // subclasses of SGPR_160 to be marked unallocatable too.
 }
 
-def SReg_192 : RegisterClass<"AMDGPU", [untyped], 32, (add SGPR_192Regs)> {
+def SGPR_192 : RegisterClass<"AMDGPU", [untyped], 32, (add SGPR_192Regs)> {
   let Size = 192;
   let AllocationPriority = 17;
 }
 
+def SReg_192 : RegisterClass<"AMDGPU", [untyped], 32, (add SGPR_192)> {
+  let Size = 192;
+  let isAllocatable = 0;
+}
+
 def SGPR_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64], 32, (add SGPR_256Regs)> {
   let AllocationPriority = 18;
 }
@@ -715,7 +720,7 @@ def SReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32, v4i64], 32,
                              (add SGPR_256, TTMP_256)> {
   // Requires 4 s_mov_b64 to copy
   let CopyCost = 4;
-  let AllocationPriority = 18;
+  let isAllocatable = 0;
 }
 
 def SGPR_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32,
@@ -732,7 +737,7 @@ def SReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32,
                              (add SGPR_512, TTMP_512)> {
   // Requires 8 s_mov_b64 to copy
   let CopyCost = 8;
-  let AllocationPriority = 19;
+  let isAllocatable = 0;
 }
 
 def VRegOrLds_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32,
@@ -748,7 +753,7 @@ def SGPR_1024 : RegisterClass<"AMDGPU", [v32i32, v32f32], 32,
 def SReg_1024 : RegisterClass<"AMDGPU", [v32i32, v32f32], 32,
                               (add SGPR_1024)> {
   let CopyCost = 16;
-  let AllocationPriority = 20;
+  let isAllocatable = 0;
 }
 
 // Register class for all vector registers (VGPRs + Interpolation Registers)

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 983870032efb..b896896af4c3 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1116,18 +1116,22 @@ unsigned getRegBitWidth(unsigned RCID) {
   case AMDGPU::VReg_160RegClassID:
   case AMDGPU::AReg_160RegClassID:
     return 160;
+  case AMDGPU::SGPR_192RegClassID:
   case AMDGPU::SReg_192RegClassID:
   case AMDGPU::VReg_192RegClassID:
   case AMDGPU::AReg_192RegClassID:
     return 192;
+  case AMDGPU::SGPR_256RegClassID:
   case AMDGPU::SReg_256RegClassID:
   case AMDGPU::VReg_256RegClassID:
   case AMDGPU::AReg_256RegClassID:
     return 256;
+  case AMDGPU::SGPR_512RegClassID:
   case AMDGPU::SReg_512RegClassID:
   case AMDGPU::VReg_512RegClassID:
   case AMDGPU::AReg_512RegClassID:
     return 512;
+  case AMDGPU::SGPR_1024RegClassID:
   case AMDGPU::SReg_1024RegClassID:
   case AMDGPU::VReg_1024RegClassID:
   case AMDGPU::AReg_1024RegClassID:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
index 938b7af5b972..d8f2fad8f938 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
@@ -99,7 +99,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
     ; GCN: $sgpr0_sgpr1_sgpr2 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x s16>) = COPY $sgpr0
     %1:sgpr(<2 x s16>) = COPY $sgpr1
@@ -233,7 +233,7 @@ body: |
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
     ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
     ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_160 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3, [[COPY4]], %subreg.sub4
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_160 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3, [[COPY4]], %subreg.sub4
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x s16>) = COPY $sgpr0
     %1:sgpr(<2 x s16>) = COPY $sgpr1
@@ -283,7 +283,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_192 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_192 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5
     ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
     %0:sgpr(<4 x s16>) = COPY $sgpr0_sgpr1
     %1:sgpr(<4 x s16>) = COPY $sgpr2_sgpr3
@@ -328,7 +328,7 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
     ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<4 x s16>) = COPY $sgpr0_sgpr1
     %1:sgpr(<4 x s16>) = COPY $sgpr2_sgpr3
@@ -350,7 +350,7 @@ body: |
     ; GCN-LABEL: name: test_concat_vectors_s_v12s16_s_v8s16_s_v8s16
     ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<8 x s16>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(<8 x s16>) = COPY $sgpr4_sgpr5_sgpr6_sgpr7
@@ -368,9 +368,9 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; GCN-LABEL: name: test_concat_vectors_s_v32s16_s_v12s16_s_v12s16
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<16 x s16>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7,
     %1:sgpr(<16 x s16>) = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -396,7 +396,7 @@ body: |
     ; GCN: [[COPY5:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GCN: [[COPY6:%[0-9]+]]:sreg_64 = COPY $sgpr12_sgpr13
     ; GCN: [[COPY7:%[0-9]+]]:sreg_64 = COPY $sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7, [[COPY4]], %subreg.sub8_sub9, [[COPY5]], %subreg.sub10_sub11, [[COPY6]], %subreg.sub12_sub13, [[COPY7]], %subreg.sub14_sub15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7, [[COPY4]], %subreg.sub8_sub9, [[COPY5]], %subreg.sub10_sub11, [[COPY6]], %subreg.sub12_sub13, [[COPY7]], %subreg.sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<4 x s16>) = COPY $sgpr0_sgpr1
     %1:sgpr(<4 x s16>) = COPY $sgpr2_sgpr3
@@ -501,7 +501,7 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
     ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x s32>) = COPY $sgpr0_sgpr1
     %1:sgpr(<2 x s32>) = COPY $sgpr2_sgpr3
@@ -524,7 +524,7 @@ body: |
     ; GCN-LABEL: name: test_concat_vectors_s_v8s32_s_v4s32_s_v4s32
     ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(<4 x s32>) = COPY $sgpr4_sgpr5_sgpr6_sgpr7
@@ -542,9 +542,9 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; GCN-LABEL: name: test_concat_vectors_s_v16s32_s_v8s32_s_v8s32
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7,
     %1:sgpr(<8 x s32>) = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -594,9 +594,9 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
 
     ; GCN-LABEL: name: test_concat_vectors_s_v32s32_s_v16s32_s_v16s32
-    ; GCN: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_512 = COPY $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_1024 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, [[COPY1]], %subreg.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_512 = COPY $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_1024 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, [[COPY1]], %subreg.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<16 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     %1:sgpr(<16 x s32>) = COPY $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
@@ -616,7 +616,7 @@ body: |
     ; GCN-LABEL: name: test_concat_vectors_s_v4s64_s_v2s64_s_v2s64
     ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(<2 x s64>) = COPY $sgpr4_sgpr5_sgpr6_sgpr7
@@ -654,9 +654,9 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; GCN-LABEL: name: test_concat_vectors_s_v8s64_s_v4s64_s_v4s64
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<4 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7,
     %1:sgpr(<4 x s64>) = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -678,7 +678,7 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
     ; GCN: [[COPY2:%[0-9]+]]:sgpr_128 = COPY $sgpr8_sgpr9_sgpr10_sgpr11
     ; GCN: [[COPY3:%[0-9]+]]:sgpr_128 = COPY $sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7, [[COPY2]], %subreg.sub8_sub9_sub10_sub11, [[COPY3]], %subreg.sub12_sub13_sub14_sub15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7, [[COPY2]], %subreg.sub8_sub9_sub10_sub11, [[COPY3]], %subreg.sub12_sub13_sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(<2 x s64>) = COPY $sgpr4_sgpr5_sgpr6_sgpr7
@@ -700,7 +700,7 @@ body: |
     ; GCN-LABEL: name: test_concat_vectors_s_v4p1_s_v2p1_s_v2p1
     ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x p1>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(<2 x p1>) = COPY $sgpr4_sgpr5_sgpr6_sgpr7
@@ -742,7 +742,7 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
     ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(<2 x p3>) = COPY $sgpr0_sgpr1
     %1:sgpr(<2 x p3>) = COPY $sgpr2_sgpr3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
index bd349646e81a..f437bf9de987 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
@@ -41,13 +41,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2, $sgpr3
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v3s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_96 = COPY $sgpr0_sgpr1_sgpr2
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v3s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_96 = COPY $sgpr0_sgpr1_sgpr2
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
@@ -95,13 +95,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v8s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v8s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
@@ -122,13 +122,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v16s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v16s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
@@ -149,13 +149,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31, $sgpr40
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v32s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v32s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
@@ -203,13 +203,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v4s64
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v4s64
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
@@ -230,13 +230,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v8s64
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
@@ -257,13 +257,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31, $sgpr40
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v16s64
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v16s64
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
@@ -284,13 +284,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_1
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub1, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_1
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub1, implicit $m0, implicit [[COPY]]
@@ -313,7 +313,7 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_m1
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967295
     ; MOVREL: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
@@ -321,7 +321,7 @@ body: |
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_m1
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967295
     ; GPRIDX: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
@@ -346,13 +346,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_7
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub7, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_7
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub7, implicit $m0, implicit [[COPY]]
@@ -375,7 +375,7 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_8
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
     ; MOVREL: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
@@ -383,7 +383,7 @@ body: |
     ; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v8s32_idx_offset_8
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
     ; GPRIDX: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
@@ -408,13 +408,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_1
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub2_sub3, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_1
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub2_sub3, implicit $m0, implicit [[COPY]]
@@ -437,13 +437,13 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_2
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: $m0 = COPY [[COPY1]]
     ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub4_sub5, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_2
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: $m0 = COPY [[COPY1]]
     ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub4_sub5, implicit $m0, implicit [[COPY]]
@@ -466,7 +466,7 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_m1
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967295
     ; MOVREL: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
@@ -474,7 +474,7 @@ body: |
     ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
     ; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_m1
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967295
     ; GPRIDX: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
index 0d8739b957a5..795ebc6a1a9a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
@@ -8,7 +8,7 @@ regBankSelected: true
 body: |
   bb.0:
     ; CHECK-LABEL: name: extract512
-    ; CHECK: [[DEF:%[0-9]+]]:sreg_512 = IMPLICIT_DEF
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr_512 = IMPLICIT_DEF
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
     ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
     ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub2
@@ -86,7 +86,7 @@ regBankSelected: true
 body: |
   bb.0:
     ; CHECK-LABEL: name: extract_s_s32_s1024
-    ; CHECK: [[DEF:%[0-9]+]]:sreg_1024 = IMPLICIT_DEF
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr_1024 = IMPLICIT_DEF
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
     ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
     ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub2
@@ -189,8 +189,8 @@ body: |
     ; CHECK-LABEL: name: extract_sgpr_s96_from_s128
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_128_with_sub1_sub2_sub3 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:sreg_96 = COPY [[COPY1]].sub0_sub1_sub2
-    ; CHECK: [[COPY3:%[0-9]+]]:sreg_96 = COPY [[COPY]].sub1_sub2_sub3
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr_96 = COPY [[COPY1]].sub0_sub1_sub2
+    ; CHECK: [[COPY3:%[0-9]+]]:sgpr_96 = COPY [[COPY]].sub1_sub2_sub3
     ; CHECK: S_ENDPGM 0, implicit [[COPY2]], implicit [[COPY3]]
     %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(s96) = G_EXTRACT %0, 0
@@ -210,8 +210,8 @@ body: |
     ; CHECK-LABEL: name: extract_sgpr_v3s32_from_v4s32
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_128_with_sub1_sub2_sub3 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:sreg_96 = COPY [[COPY1]].sub0_sub1_sub2
-    ; CHECK: [[COPY3:%[0-9]+]]:sreg_96 = COPY [[COPY]].sub1_sub2_sub3
+    ; CHECK: [[COPY2:%[0-9]+]]:sgpr_96 = COPY [[COPY1]].sub0_sub1_sub2
+    ; CHECK: [[COPY3:%[0-9]+]]:sgpr_96 = COPY [[COPY]].sub1_sub2_sub3
     ; CHECK: S_ENDPGM 0, implicit [[COPY2]], implicit [[COPY3]]
     %0:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(<3 x s32>) = G_EXTRACT %0, 0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
index 57ff1d89e8dc..4699652fa4bb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
@@ -194,7 +194,7 @@ regBankSelected: true
 body: |
   bb.0:
     ; GCN-LABEL: name: implicit_def_s1024_sgpr
-    ; GCN: [[DEF:%[0-9]+]]:sreg_1024 = IMPLICIT_DEF
+    ; GCN: [[DEF:%[0-9]+]]:sgpr_1024 = IMPLICIT_DEF
     ; GCN: S_ENDPGM 0, implicit [[DEF]]
     %0:sgpr(s1024) = G_IMPLICIT_DEF
     S_ENDPGM 0, implicit %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert-vector-elt.mir
index a64b2a30c65c..ee3e76d89380 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert-vector-elt.mir
@@ -42,18 +42,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2, $sgpr3, $sgpr4
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s32_v3s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_96 = COPY $sgpr0_sgpr1_sgpr2
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V3_:%[0-9]+]]:sreg_96 = S_INDIRECT_REG_WRITE_B32_V3 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V3_:%[0-9]+]]:sgpr_96 = S_INDIRECT_REG_WRITE_B32_V3 [[COPY]], [[COPY1]], 3, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V3_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s32_v3s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_96 = COPY $sgpr0_sgpr1_sgpr2
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V3_:%[0-9]+]]:sreg_96 = S_INDIRECT_REG_WRITE_B32_V3 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V3_:%[0-9]+]]:sgpr_96 = S_INDIRECT_REG_WRITE_B32_V3 [[COPY]], [[COPY1]], 3, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V3_]]
     %0:sgpr(<3 x s32>) = COPY $sgpr0_sgpr1_sgpr2
     %1:sgpr(s32) = COPY $sgpr3
@@ -102,18 +102,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4, $sgpr5, $sgpr6
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s32_v5s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_160 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_160 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr5
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr6
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V5_:%[0-9]+]]:sreg_160 = S_INDIRECT_REG_WRITE_B32_V5 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V5_:%[0-9]+]]:sgpr_160 = S_INDIRECT_REG_WRITE_B32_V5 [[COPY]], [[COPY1]], 3, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V5_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s32_v5s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_160 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_160 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr5
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr6
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V5_:%[0-9]+]]:sreg_160 = S_INDIRECT_REG_WRITE_B32_V5 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V5_:%[0-9]+]]:sgpr_160 = S_INDIRECT_REG_WRITE_B32_V5 [[COPY]], [[COPY1]], 3, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V5_]]
     %0:sgpr(<5 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
     %1:sgpr(s32) = COPY $sgpr5
@@ -132,18 +132,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8, $sgpr9
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s32_v8s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr9
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V8_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s32_v8s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr9
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V8_]]
     %0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     %1:sgpr(s32) = COPY $sgpr8
@@ -162,18 +162,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15, $sgpr16, $sgpr17
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s32_v16s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr16
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr17
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V16_:%[0-9]+]]:sreg_512 = S_INDIRECT_REG_WRITE_B32_V16 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V16_:%[0-9]+]]:sgpr_512 = S_INDIRECT_REG_WRITE_B32_V16 [[COPY]], [[COPY1]], 3, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V16_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s32_v16s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr16
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr17
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V16_:%[0-9]+]]:sreg_512 = S_INDIRECT_REG_WRITE_B32_V16 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V16_:%[0-9]+]]:sgpr_512 = S_INDIRECT_REG_WRITE_B32_V16 [[COPY]], [[COPY1]], 3, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V16_]]
     %0:sgpr(<16 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     %1:sgpr(s32) = COPY $sgpr16
@@ -192,18 +192,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31, $sgpr40, $sgpr41
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s32_v32s32
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr41
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V32_:%[0-9]+]]:sreg_1024 = S_INDIRECT_REG_WRITE_B32_V32 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V32_:%[0-9]+]]:sgpr_1024 = S_INDIRECT_REG_WRITE_B32_V32 [[COPY]], [[COPY1]], 3, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V32_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v32s32
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr41
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V32_:%[0-9]+]]:sreg_1024 = S_INDIRECT_REG_WRITE_B32_V32 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V32_:%[0-9]+]]:sgpr_1024 = S_INDIRECT_REG_WRITE_B32_V32 [[COPY]], [[COPY1]], 3, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V32_]]
     %0:sgpr(<32 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     %1:sgpr(s32) = COPY $sgpr40
@@ -252,18 +252,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s64_v4s64
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr8_sgpr9
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr10
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B64_V4_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B64_V4 [[COPY]], [[COPY1]], 4, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B64_V4_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B64_V4 [[COPY]], [[COPY1]], 4, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B64_V4_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s64_v4s64
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr8_sgpr9
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr10
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B64_V4_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B64_V4 [[COPY]], [[COPY1]], 4, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B64_V4_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B64_V4 [[COPY]], [[COPY1]], 4, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B64_V4_]]
     %0:sgpr(<4 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     %1:sgpr(s64) = COPY $sgpr8_sgpr9
@@ -282,18 +282,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15, $sgpr16_sgpr17, $sgpr18
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s64_v8s64
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr16_sgpr17
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr18
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B64_V8_:%[0-9]+]]:sreg_512 = S_INDIRECT_REG_WRITE_B64_V8 [[COPY]], [[COPY1]], 4, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B64_V8_:%[0-9]+]]:sgpr_512 = S_INDIRECT_REG_WRITE_B64_V8 [[COPY]], [[COPY1]], 4, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B64_V8_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s64_v8s64
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr16_sgpr17
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr18
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B64_V8_:%[0-9]+]]:sreg_512 = S_INDIRECT_REG_WRITE_B64_V8 [[COPY]], [[COPY1]], 4, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B64_V8_:%[0-9]+]]:sgpr_512 = S_INDIRECT_REG_WRITE_B64_V8 [[COPY]], [[COPY1]], 4, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B64_V8_]]
     %0:sgpr(<8 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     %1:sgpr(s64) = COPY $sgpr16_sgpr17
@@ -312,18 +312,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr42
 
     ; MOVREL-LABEL: name: extract_vector_elt_s_s64_v16s64
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr40_sgpr41
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr42
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B64_V16_:%[0-9]+]]:sreg_1024 = S_INDIRECT_REG_WRITE_B64_V16 [[COPY]], [[COPY1]], 4, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B64_V16_:%[0-9]+]]:sgpr_1024 = S_INDIRECT_REG_WRITE_B64_V16 [[COPY]], [[COPY1]], 4, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B64_V16_]]
     ; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v16s64
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr40_sgpr41
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr42
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B64_V16_:%[0-9]+]]:sreg_1024 = S_INDIRECT_REG_WRITE_B64_V16 [[COPY]], [[COPY1]], 4, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B64_V16_:%[0-9]+]]:sgpr_1024 = S_INDIRECT_REG_WRITE_B64_V16 [[COPY]], [[COPY1]], 4, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B64_V16_]]
     %0:sgpr(<16 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
     %1:sgpr(s64) = COPY $sgpr40_sgpr41
@@ -567,18 +567,18 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8, $sgpr9
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s32_v8s32_add_1
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr9
     ; MOVREL: $m0 = COPY [[COPY2]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 10, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 10, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V8_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s32_v8s32_add_1
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr9
     ; GPRIDX: $m0 = COPY [[COPY2]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 10, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 10, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V8_]]
     %0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     %1:sgpr(s32) = COPY $sgpr8
@@ -599,22 +599,22 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8, $sgpr9
 
     ; MOVREL-LABEL: name: insert_vector_elt_s_s32_v8s32_add_8
-    ; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; MOVREL: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; MOVREL: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr9
     ; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
     ; MOVREL: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc
     ; MOVREL: $m0 = COPY [[S_ADD_I32_]]
-    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; MOVREL: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
     ; MOVREL: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V8_]]
     ; GPRIDX-LABEL: name: insert_vector_elt_s_s32_v8s32_add_8
-    ; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GPRIDX: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
     ; GPRIDX: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr9
     ; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
     ; GPRIDX: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc
     ; GPRIDX: $m0 = COPY [[S_ADD_I32_]]
-    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sreg_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
+    ; GPRIDX: [[S_INDIRECT_REG_WRITE_B32_V8_:%[0-9]+]]:sgpr_256 = S_INDIRECT_REG_WRITE_B32_V8 [[COPY]], [[COPY1]], 3, implicit $m0
     ; GPRIDX: S_ENDPGM 0, implicit [[S_INDIRECT_REG_WRITE_B32_V8_]]
     %0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     %1:sgpr(s32) = COPY $sgpr8

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
index c6f67fbb9414..d988a5e0195e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
@@ -10,24 +10,24 @@ regBankSelected: true
 body: |
   bb.0:
     ; CHECK-LABEL: name: insert_s512_s32
-    ; CHECK: [[DEF:%[0-9]+]]:sreg_512 = IMPLICIT_DEF
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr_512 = IMPLICIT_DEF
     ; CHECK: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[DEF]], [[DEF1]], %subreg.sub0
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG]], [[DEF1]], %subreg.sub1
-    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG1]], [[DEF1]], %subreg.sub2
-    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG2]], [[DEF1]], %subreg.sub3
-    ; CHECK: [[INSERT_SUBREG4:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG3]], [[DEF1]], %subreg.sub4
-    ; CHECK: [[INSERT_SUBREG5:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG4]], [[DEF1]], %subreg.sub5
-    ; CHECK: [[INSERT_SUBREG6:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG5]], [[DEF1]], %subreg.sub6
-    ; CHECK: [[INSERT_SUBREG7:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG6]], [[DEF1]], %subreg.sub7
-    ; CHECK: [[INSERT_SUBREG8:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG7]], [[DEF1]], %subreg.sub8
-    ; CHECK: [[INSERT_SUBREG9:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG8]], [[DEF1]], %subreg.sub9
-    ; CHECK: [[INSERT_SUBREG10:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG9]], [[DEF1]], %subreg.sub10
-    ; CHECK: [[INSERT_SUBREG11:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG10]], [[DEF1]], %subreg.sub11
-    ; CHECK: [[INSERT_SUBREG12:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG11]], [[DEF1]], %subreg.sub12
-    ; CHECK: [[INSERT_SUBREG13:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG12]], [[DEF1]], %subreg.sub13
-    ; CHECK: [[INSERT_SUBREG14:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG13]], [[DEF1]], %subreg.sub14
-    ; CHECK: [[INSERT_SUBREG15:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG14]], [[DEF1]], %subreg.sub15
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[DEF]], [[DEF1]], %subreg.sub0
+    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG]], [[DEF1]], %subreg.sub1
+    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG1]], [[DEF1]], %subreg.sub2
+    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG2]], [[DEF1]], %subreg.sub3
+    ; CHECK: [[INSERT_SUBREG4:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG3]], [[DEF1]], %subreg.sub4
+    ; CHECK: [[INSERT_SUBREG5:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG4]], [[DEF1]], %subreg.sub5
+    ; CHECK: [[INSERT_SUBREG6:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG5]], [[DEF1]], %subreg.sub6
+    ; CHECK: [[INSERT_SUBREG7:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG6]], [[DEF1]], %subreg.sub7
+    ; CHECK: [[INSERT_SUBREG8:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG7]], [[DEF1]], %subreg.sub8
+    ; CHECK: [[INSERT_SUBREG9:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG8]], [[DEF1]], %subreg.sub9
+    ; CHECK: [[INSERT_SUBREG10:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG9]], [[DEF1]], %subreg.sub10
+    ; CHECK: [[INSERT_SUBREG11:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG10]], [[DEF1]], %subreg.sub11
+    ; CHECK: [[INSERT_SUBREG12:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG11]], [[DEF1]], %subreg.sub12
+    ; CHECK: [[INSERT_SUBREG13:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG12]], [[DEF1]], %subreg.sub13
+    ; CHECK: [[INSERT_SUBREG14:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG13]], [[DEF1]], %subreg.sub14
+    ; CHECK: [[INSERT_SUBREG15:%[0-9]+]]:sgpr_512 = INSERT_SUBREG [[INSERT_SUBREG14]], [[DEF1]], %subreg.sub15
     ; CHECK: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[INSERT_SUBREG15]]
     ; CHECK: SI_RETURN_TO_EPILOG $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     %0:sgpr(s512) = G_IMPLICIT_DEF
@@ -218,7 +218,7 @@ body: |
     ; CHECK-LABEL: name: insert_s_s96_s_s64_0
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_96_with_sub0_sub1 = COPY $sgpr0_sgpr1_sgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_96 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_96 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2
     %1:sgpr(s64) = COPY $sgpr4_sgpr5
@@ -238,7 +238,7 @@ body: |
     ; CHECK-LABEL: name: insert_s_s96_s_s64_32
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_96_with_sub1_sub2 = COPY $sgpr0_sgpr1_sgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_96 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1_sub2
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_96 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1_sub2
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2
     %1:sgpr(s64) = COPY $sgpr4_sgpr5
@@ -331,9 +331,9 @@ body: |
   bb.0:
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9
     ; CHECK-LABEL: name: insert_s_s256_s_s64_128
-    ; CHECK: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_256 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub4_sub5
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_256 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub4_sub5
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s256) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     %1:sgpr(s64) = COPY $sgpr4_sgpr5
@@ -367,7 +367,7 @@ body: |
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr6_sgpr7_sgpr8
     ; CHECK-LABEL: name: insert_s_s128_s_s96_0
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-    ; CHECK: [[COPY1:%[0-9]+]]:sreg_96 = COPY $sgpr6_sgpr7_sgpr8
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr_96 = COPY $sgpr6_sgpr7_sgpr8
     ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_128 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1_sub2
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
@@ -387,7 +387,7 @@ body: |
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr6_sgpr7_sgpr8
     ; CHECK-LABEL: name: insert_s_s128_s_s96_32
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_128_with_sub1_sub2_sub3 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-    ; CHECK: [[COPY1:%[0-9]+]]:sreg_96 = COPY $sgpr6_sgpr7_sgpr8
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr_96 = COPY $sgpr6_sgpr7_sgpr8
     ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_128 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1_sub2_sub3
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
@@ -407,8 +407,8 @@ body: |
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4, $sgpr6_sgpr7_sgpr8
     ; CHECK-LABEL: name: insert_s_s160_s_s96_0
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_160_with_sub0_sub1_sub2 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
-    ; CHECK: [[COPY1:%[0-9]+]]:sreg_96 = COPY $sgpr6_sgpr7_sgpr8
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_160 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1_sub2
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr_96 = COPY $sgpr6_sgpr7_sgpr8
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_160 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1_sub2
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s160) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
     %1:sgpr(s96) = COPY $sgpr6_sgpr7_sgpr8
@@ -427,8 +427,8 @@ body: |
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4, $sgpr6_sgpr7_sgpr8
     ; CHECK-LABEL: name: insert_s_s160_s_s96_32
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_160_with_sub1_sub2_sub3 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
-    ; CHECK: [[COPY1:%[0-9]+]]:sreg_96 = COPY $sgpr6_sgpr7_sgpr8
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_160 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1_sub2_sub3
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr_96 = COPY $sgpr6_sgpr7_sgpr8
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_160 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1_sub2_sub3
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s160) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
     %1:sgpr(s96) = COPY $sgpr6_sgpr7_sgpr8
@@ -447,8 +447,8 @@ body: |
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4, $sgpr6_sgpr7_sgpr8
     ; CHECK-LABEL: name: insert_s_s160_s_s96_64
     ; CHECK: [[COPY:%[0-9]+]]:sgpr_160_with_sub2_sub3_sub4 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
-    ; CHECK: [[COPY1:%[0-9]+]]:sreg_96 = COPY $sgpr6_sgpr7_sgpr8
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_160 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub2_sub3_sub4
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr_96 = COPY $sgpr6_sgpr7_sgpr8
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_160 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub2_sub3_sub4
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s160) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4
     %1:sgpr(s96) = COPY $sgpr6_sgpr7_sgpr8
@@ -467,9 +467,9 @@ body: |
     liveins:  $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11
 
     ; CHECK-LABEL: name: insert_s_s256_s_s128_0
-    ; CHECK: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr8_sgpr9_sgpr10_sgpr11
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_256 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1_sub2_sub3
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_256 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1_sub2_sub3
     ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
     %0:sgpr(s256) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     %1:sgpr(s128) = COPY $sgpr8_sgpr9_sgpr10_sgpr11

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
index 153b2860177e..495f6c8ca380 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
@@ -142,7 +142,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
     ; GCN: $sgpr0_sgpr1_sgpr2 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
@@ -288,7 +288,7 @@ body: |
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
     ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
     ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_160 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3, [[COPY4]], %subreg.sub4
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_160 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3, [[COPY4]], %subreg.sub4
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
@@ -342,7 +342,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_192 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_192 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5
     ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
@@ -391,7 +391,7 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
     ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
@@ -415,7 +415,7 @@ body: |
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5_sgpr6_sgpr7
     ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3, [[COPY1]], %subreg.sub4_sub5_sub6_sub7
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(s128) = COPY $sgpr4_sgpr5_sgpr6_sgpr7
@@ -435,9 +435,9 @@ body: |
 
     ; GCN-LABEL: name: test_merge_values_s_s512_s_s256_s_s256
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[COPY1]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s256) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7,
     %1:sgpr(s256) = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -465,7 +465,7 @@ body: |
     ; GCN: [[COPY5:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GCN: [[COPY6:%[0-9]+]]:sreg_64 = COPY $sgpr12_sgpr13
     ; GCN: [[COPY7:%[0-9]+]]:sreg_64 = COPY $sgpr14_sgpr15
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7, [[COPY4]], %subreg.sub8_sub9, [[COPY5]], %subreg.sub10_sub11, [[COPY6]], %subreg.sub12_sub13, [[COPY7]], %subreg.sub14_sub15
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7, [[COPY4]], %subreg.sub8_sub9, [[COPY5]], %subreg.sub10_sub11, [[COPY6]], %subreg.sub12_sub13, [[COPY7]], %subreg.sub14_sub15
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
@@ -569,11 +569,11 @@ body: |
 
     ; GCN-LABEL: name: test_merge_values_s_s1024_s_s256_s_s256_s_s256_s_s256
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
-    ; GCN: [[DEF:%[0-9]+]]:sreg_256 = IMPLICIT_DEF
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[DEF1:%[0-9]+]]:sreg_256 = IMPLICIT_DEF
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_1024 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[DEF]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, [[COPY1]], %subreg.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23, [[DEF1]], %subreg.sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GCN: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[DEF1:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_1024 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7, [[DEF]], %subreg.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, [[COPY1]], %subreg.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23, [[DEF1]], %subreg.sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
     ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
     %0:sgpr(s256) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7,
     %1:sgpr(s256) = G_IMPLICIT_DEF
@@ -596,9 +596,9 @@ body: |
 
     ; GCN-LABEL: name: test_merge_values_s_s1024_s_s512
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15, $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
-    ; GCN: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_512 = COPY $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
-    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_1024 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, [[COPY1]], %subreg.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_512 = COPY $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_1024 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, [[COPY1]], %subreg.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31 = COPY [[REG_SEQUENCE]]
     %0:sgpr(s512) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     %1:sgpr(s512) = COPY $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
index 28dd145e0079..238e1d15f0d3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
@@ -94,7 +94,7 @@ body: |
   bb.0:
     liveins: $sgpr0_sgpr1_sgpr2
     ; GCN-LABEL: name: trunc_sgpr_s96_to_s16
-    ; GCN: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_96 = COPY $sgpr0_sgpr1_sgpr2
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
     ; GCN: S_ENDPGM 0, implicit [[COPY1]]
     %0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2
@@ -146,7 +146,7 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3
     ; GCN-LABEL: name: trunc_sgpr_s128_to_s96
     ; GCN: [[COPY:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_96 = COPY [[COPY]].sub0_sub1_sub2
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_96 = COPY [[COPY]].sub0_sub1_sub2
     ; GCN: S_ENDPGM 0, implicit [[COPY1]]
     %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
     %1:sgpr(s96) = G_TRUNC %0
@@ -162,7 +162,7 @@ body: |
   bb.0:
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GCN-LABEL: name: trunc_sgpr_s256_to_s128
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
     ; GCN: [[COPY1:%[0-9]+]]:sgpr_128 = COPY [[COPY]].sub0_sub1_sub2_sub3
     ; GCN: S_ENDPGM 0, implicit [[COPY1]]
     %0:sgpr(s256) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
@@ -179,8 +179,8 @@ body: |
   bb.0:
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     ; GCN-LABEL: name: trunc_sgpr_s512_to_s256
-    ; GCN: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY [[COPY]].sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY [[COPY]].sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
     ; GCN: S_ENDPGM 0, implicit [[COPY1]]
     %0:sgpr(s512) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
     %1:sgpr(s256) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
index 901e04a9c979..7a27911288ac 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
@@ -114,7 +114,7 @@ body: |
 
     ; GCN-LABEL: name: test_unmerge_values_s_s32_s_s32_s32_s_s96
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2
-    ; GCN: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_96 = COPY $sgpr0_sgpr1_sgpr2
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
     ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub2
@@ -180,7 +180,7 @@ body: |
 
     ; GCN-LABEL: name: test_unmerge_values_s_s64_s_s64_s64_s_s192
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
-    ; GCN: [[DEF:%[0-9]+]]:sreg_192 = IMPLICIT_DEF
+    ; GCN: [[DEF:%[0-9]+]]:sgpr_192 = IMPLICIT_DEF
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY [[DEF]].sub0_sub1
     ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY [[DEF]].sub2_sub3
     ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY [[DEF]].sub4_sub5
@@ -244,11 +244,11 @@ body: |
 
     ; GCN-LABEL: name: test_unmerge_values_s_s256_s_s1024
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
-    ; GCN: [[DEF:%[0-9]+]]:sreg_1024 = IMPLICIT_DEF
-    ; GCN: [[COPY:%[0-9]+]]:sreg_256 = COPY [[DEF]].sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_256 = COPY [[DEF]].sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
-    ; GCN: [[COPY2:%[0-9]+]]:sreg_256 = COPY [[DEF]].sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23
-    ; GCN: [[COPY3:%[0-9]+]]:sreg_256 = COPY [[DEF]].sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
+    ; GCN: [[DEF:%[0-9]+]]:sgpr_1024 = IMPLICIT_DEF
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_256 = COPY [[DEF]].sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_256 = COPY [[DEF]].sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
+    ; GCN: [[COPY2:%[0-9]+]]:sgpr_256 = COPY [[DEF]].sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23
+    ; GCN: [[COPY3:%[0-9]+]]:sgpr_256 = COPY [[DEF]].sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
     ; GCN: S_ENDPGM 0, implicit [[COPY]], implicit [[COPY1]], implicit [[COPY2]], implicit [[COPY3]]
     %0:sgpr(s1024) = G_IMPLICIT_DEF
     %1:sgpr(s256), %2:sgpr(s256), %3:sgpr(s256), %4:sgpr(s256) = G_UNMERGE_VALUES %0
@@ -273,9 +273,9 @@ body: |
     ; CHECK: $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31 = COPY [[UV1]](s512)
     ; GCN-LABEL: name: test_unmerge_values_s_s512_s_s1024
     ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
-    ; GCN: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
-    ; GCN: [[COPY1:%[0-9]+]]:sreg_512 = COPY [[COPY]].sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
-    ; GCN: [[COPY2:%[0-9]+]]:sreg_512 = COPY [[COPY]].sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
+    ; GCN: [[COPY:%[0-9]+]]:sgpr_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
+    ; GCN: [[COPY1:%[0-9]+]]:sgpr_512 = COPY [[COPY]].sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
+    ; GCN: [[COPY2:%[0-9]+]]:sgpr_512 = COPY [[COPY]].sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
     ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[COPY1]]
     ; GCN: $sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31 = COPY [[COPY2]]
     %0:sgpr(s1024) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll
index 877240472b44..9c44181a888e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll
@@ -173,7 +173,7 @@ define amdgpu_ps <3 x i32> @s_buffer_load_v3i32(<4 x i32> inreg %rsrc, i32 inreg
   ; GFX6:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX6:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
   ; GFX6:   [[S_BUFFER_LOAD_DWORDX4_SGPR:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = S_BUFFER_LOAD_DWORDX4_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 12, align 4)
-  ; GFX6:   [[COPY5:%[0-9]+]]:sreg_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR]].sub0_sub1_sub2
+  ; GFX6:   [[COPY5:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR]].sub0_sub1_sub2
   ; GFX6:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub0
   ; GFX6:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub1
   ; GFX6:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub2
@@ -197,7 +197,7 @@ define amdgpu_ps <3 x i32> @s_buffer_load_v3i32(<4 x i32> inreg %rsrc, i32 inreg
   ; GFX7:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX7:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
   ; GFX7:   [[S_BUFFER_LOAD_DWORDX4_SGPR:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = S_BUFFER_LOAD_DWORDX4_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 12, align 4)
-  ; GFX7:   [[COPY5:%[0-9]+]]:sreg_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR]].sub0_sub1_sub2
+  ; GFX7:   [[COPY5:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR]].sub0_sub1_sub2
   ; GFX7:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub0
   ; GFX7:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub1
   ; GFX7:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub2
@@ -221,7 +221,7 @@ define amdgpu_ps <3 x i32> @s_buffer_load_v3i32(<4 x i32> inreg %rsrc, i32 inreg
   ; GFX8:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX8:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
   ; GFX8:   [[S_BUFFER_LOAD_DWORDX4_SGPR:%[0-9]+]]:sgpr_128_with_sub0_sub1_sub2 = S_BUFFER_LOAD_DWORDX4_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 12, align 4)
-  ; GFX8:   [[COPY5:%[0-9]+]]:sreg_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR]].sub0_sub1_sub2
+  ; GFX8:   [[COPY5:%[0-9]+]]:sgpr_96 = COPY [[S_BUFFER_LOAD_DWORDX4_SGPR]].sub0_sub1_sub2
   ; GFX8:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub0
   ; GFX8:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub1
   ; GFX8:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[COPY5]].sub2
@@ -249,7 +249,7 @@ define amdgpu_ps <8 x i32> @s_buffer_load_v8i32(<4 x i32> inreg %rsrc, i32 inreg
   ; GFX6:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
   ; GFX6:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX6:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
-  ; GFX6:   [[S_BUFFER_LOAD_DWORDX8_SGPR:%[0-9]+]]:sreg_256 = S_BUFFER_LOAD_DWORDX8_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 32, align 4)
+  ; GFX6:   [[S_BUFFER_LOAD_DWORDX8_SGPR:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 32, align 4)
   ; GFX6:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub0
   ; GFX6:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub1
   ; GFX6:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub2
@@ -292,7 +292,7 @@ define amdgpu_ps <8 x i32> @s_buffer_load_v8i32(<4 x i32> inreg %rsrc, i32 inreg
   ; GFX7:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
   ; GFX7:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX7:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
-  ; GFX7:   [[S_BUFFER_LOAD_DWORDX8_SGPR:%[0-9]+]]:sreg_256 = S_BUFFER_LOAD_DWORDX8_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 32, align 4)
+  ; GFX7:   [[S_BUFFER_LOAD_DWORDX8_SGPR:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 32, align 4)
   ; GFX7:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub0
   ; GFX7:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub1
   ; GFX7:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub2
@@ -335,7 +335,7 @@ define amdgpu_ps <8 x i32> @s_buffer_load_v8i32(<4 x i32> inreg %rsrc, i32 inreg
   ; GFX8:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
   ; GFX8:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX8:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
-  ; GFX8:   [[S_BUFFER_LOAD_DWORDX8_SGPR:%[0-9]+]]:sreg_256 = S_BUFFER_LOAD_DWORDX8_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 32, align 4)
+  ; GFX8:   [[S_BUFFER_LOAD_DWORDX8_SGPR:%[0-9]+]]:sgpr_256 = S_BUFFER_LOAD_DWORDX8_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 32, align 4)
   ; GFX8:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub0
   ; GFX8:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub1
   ; GFX8:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX8_SGPR]].sub2
@@ -383,7 +383,7 @@ define amdgpu_ps <16 x i32> @s_buffer_load_v16i32(<4 x i32> inreg %rsrc, i32 inr
   ; GFX6:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
   ; GFX6:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX6:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
-  ; GFX6:   [[S_BUFFER_LOAD_DWORDX16_SGPR:%[0-9]+]]:sreg_512 = S_BUFFER_LOAD_DWORDX16_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 64, align 4)
+  ; GFX6:   [[S_BUFFER_LOAD_DWORDX16_SGPR:%[0-9]+]]:sgpr_512 = S_BUFFER_LOAD_DWORDX16_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 64, align 4)
   ; GFX6:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub0
   ; GFX6:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub1
   ; GFX6:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub2
@@ -458,7 +458,7 @@ define amdgpu_ps <16 x i32> @s_buffer_load_v16i32(<4 x i32> inreg %rsrc, i32 inr
   ; GFX7:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
   ; GFX7:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX7:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
-  ; GFX7:   [[S_BUFFER_LOAD_DWORDX16_SGPR:%[0-9]+]]:sreg_512 = S_BUFFER_LOAD_DWORDX16_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 64, align 4)
+  ; GFX7:   [[S_BUFFER_LOAD_DWORDX16_SGPR:%[0-9]+]]:sgpr_512 = S_BUFFER_LOAD_DWORDX16_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 64, align 4)
   ; GFX7:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub0
   ; GFX7:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub1
   ; GFX7:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub2
@@ -533,7 +533,7 @@ define amdgpu_ps <16 x i32> @s_buffer_load_v16i32(<4 x i32> inreg %rsrc, i32 inr
   ; GFX8:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
   ; GFX8:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
   ; GFX8:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
-  ; GFX8:   [[S_BUFFER_LOAD_DWORDX16_SGPR:%[0-9]+]]:sreg_512 = S_BUFFER_LOAD_DWORDX16_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 64, align 4)
+  ; GFX8:   [[S_BUFFER_LOAD_DWORDX16_SGPR:%[0-9]+]]:sgpr_512 = S_BUFFER_LOAD_DWORDX16_SGPR [[REG_SEQUENCE]], [[COPY4]], 0, 0 :: (dereferenceable invariant load 64, align 4)
   ; GFX8:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub0
   ; GFX8:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub1
   ; GFX8:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_BUFFER_LOAD_DWORDX16_SGPR]].sub2

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
index 083e9ce67b47..96f35605b1c9 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
@@ -117,14 +117,14 @@ body:             |
   bb.15:
     undef %25.sub2:vreg_128 = COPY killed %17.sub2
     %26:sreg_32_xm0 = S_MOV_B32 0
-    undef %27.sub0:sreg_256 = COPY %26
-    %27.sub1:sreg_256 = COPY %26
-    %27.sub2:sreg_256 = COPY %26
-    %27.sub3:sreg_256 = COPY %26
-    %27.sub4:sreg_256 = COPY %26
-    %27.sub5:sreg_256 = COPY %26
-    %27.sub6:sreg_256 = COPY %26
-    %27.sub7:sreg_256 = COPY killed %26
+    undef %27.sub0:sgpr_256 = COPY %26
+    %27.sub1:sgpr_256 = COPY %26
+    %27.sub2:sgpr_256 = COPY %26
+    %27.sub3:sgpr_256 = COPY %26
+    %27.sub4:sgpr_256 = COPY %26
+    %27.sub5:sgpr_256 = COPY %26
+    %27.sub6:sgpr_256 = COPY %26
+    %27.sub7:sgpr_256 = COPY killed %26
     %28:vgpr_32 = IMAGE_LOAD_V1_V4 killed %25, killed %27, 2, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
     %29:vgpr_32 = V_ADD_F32_e32 0, killed %28, implicit $exec
     $m0 = S_MOV_B32 -1

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
index b2ac4e96c95f..8bcff8a99f45 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
@@ -249,14 +249,14 @@ body:             |
     %107.sub1:vreg_64 = COPY killed %105
     $exec = S_AND_B64 $exec, killed %0, implicit-def dead $scc
     %108:sreg_32_xm0 = S_MOV_B32 0
-    undef %109.sub0:sreg_256 = COPY %108
-    %109.sub1:sreg_256 = COPY %108
-    %109.sub2:sreg_256 = COPY %108
-    %109.sub3:sreg_256 = COPY %108
-    %109.sub4:sreg_256 = COPY %108
-    %109.sub5:sreg_256 = COPY %108
-    %109.sub6:sreg_256 = COPY %108
-    %109.sub7:sreg_256 = COPY killed %108
+    undef %109.sub0:sgpr_256 = COPY %108
+    %109.sub1:sgpr_256 = COPY %108
+    %109.sub2:sgpr_256 = COPY %108
+    %109.sub3:sgpr_256 = COPY %108
+    %109.sub4:sgpr_256 = COPY %108
+    %109.sub5:sgpr_256 = COPY %108
+    %109.sub6:sgpr_256 = COPY %108
+    %109.sub7:sgpr_256 = COPY killed %108
     %110:vgpr_32 = IMAGE_SAMPLE_V1_V2 killed %107, killed %109, undef %111:sgpr_128, 8, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
     %112:vgpr_32 = V_MUL_F32_e32 0, killed %110, implicit $exec
     %113:vgpr_32 = V_MUL_F32_e32 0, killed %112, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
index c5d7628233e2..33cfffa58d8a 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
@@ -11,11 +11,11 @@ registers:
   - { id: 0, class: sreg_64 }
   - { id: 1, class: vgpr_32 }
   - { id: 2, class: vgpr_32 }
-  - { id: 3, class: sreg_256 }
+  - { id: 3, class: sgpr_256 }
   - { id: 4, class: sgpr_128 }
-  - { id: 5, class: sreg_256 }
+  - { id: 5, class: sgpr_256 }
   - { id: 6, class: sgpr_128 }
-  - { id: 7, class: sreg_512 }
+  - { id: 7, class: sgpr_512 }
   - { id: 9, class: vreg_512 }
   - { id: 11, class: vreg_512 }
   - { id: 18, class: vgpr_32 }

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir b/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
index 83b63fe23aae..a666428ded91 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
@@ -39,14 +39,14 @@ body: |
     %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     undef %3.sub0:vreg_128 = COPY killed %0
     %3.sub2:vreg_128 = COPY killed %2
-    undef %4.sub0:sreg_256 = COPY %1
-    %4.sub1:sreg_256 = COPY %1
-    %4.sub2:sreg_256 = COPY %1
-    %4.sub3:sreg_256 = COPY %1
-    %4.sub4:sreg_256 = COPY %1
-    %4.sub5:sreg_256 = COPY %1
-    %4.sub6:sreg_256 = COPY %1
-    %4.sub7:sreg_256 = COPY killed %1
+    undef %4.sub0:sgpr_256 = COPY %1
+    %4.sub1:sgpr_256 = COPY %1
+    %4.sub2:sgpr_256 = COPY %1
+    %4.sub3:sgpr_256 = COPY %1
+    %4.sub4:sgpr_256 = COPY %1
+    %4.sub5:sgpr_256 = COPY %1
+    %4.sub6:sgpr_256 = COPY %1
+    %4.sub7:sgpr_256 = COPY killed %1
     %5:vgpr_32 = IMAGE_LOAD_V1_V4 killed %3, killed %4, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
     %6:vgpr_32 = V_MAD_F32 0, killed %5, 0, 0, 0, 0, 0, 0, implicit $exec
     %7:vgpr_32 = V_RCP_F32_e32 killed %6, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/memory_clause.mir b/llvm/test/CodeGen/AMDGPU/memory_clause.mir
index 1a8d695c31be..9f154ed8bddb 100644
--- a/llvm/test/CodeGen/AMDGPU/memory_clause.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory_clause.mir
@@ -316,7 +316,7 @@ name:            image_clause
 tracksRegLiveness: true
 registers:
   - { id: 0, class: vreg_64 }
-  - { id: 1, class: sreg_256 }
+  - { id: 1, class: sgpr_256 }
   - { id: 2, class: sgpr_128 }
   - { id: 3, class: vreg_128 }
   - { id: 4, class: vreg_128 }
@@ -345,7 +345,7 @@ name:            mixed_clause
 tracksRegLiveness: true
 registers:
   - { id: 0, class: vreg_64 }
-  - { id: 1, class: sreg_256 }
+  - { id: 1, class: sgpr_256 }
   - { id: 2, class: sgpr_128 }
   - { id: 3, class: vreg_128 }
   - { id: 4, class: vreg_128 }

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-image-load.mir b/llvm/test/CodeGen/AMDGPU/merge-image-load.mir
index 5e35cf51031a..80b8e195b1ba 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-image-load.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-image-load.mir
@@ -11,11 +11,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 # GFX9-LABEL: name: image_load_merged_v1v3_reversed
@@ -29,11 +29,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -48,11 +48,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sreg_256, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
-    %7:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sreg_256, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
+    %6:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sgpr_256, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
+    %7:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sgpr_256, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
 ...
 ---
 
@@ -67,11 +67,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sreg_256, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
-    %7:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sreg_256, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
+    %6:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sgpr_256, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
+    %7:vreg_64 = IMAGE_LOAD_V2_V4 %5:vreg_128, %3:sgpr_256, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
 ...
 ---
 
@@ -86,11 +86,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
-    %7:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %6:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %7:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
 ...
 ---
 
@@ -105,11 +105,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
-    %7:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %6:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %7:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
 ...
 ---
 
@@ -122,15 +122,15 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
     %7:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %8:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %9:vreg_96 = IMAGE_LOAD_V3_V4 %7:vreg_128, %3:sreg_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %9:vreg_96 = IMAGE_LOAD_V3_V4 %7:vreg_128, %3:sgpr_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
     %10:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %11:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %11:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -144,12 +144,12 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vreg_128 = COPY %2
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    IMAGE_STORE_V4_V4 %4:vreg_128, %5:vreg_128, %3:sreg_256, 15, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    IMAGE_STORE_V4_V4 %4:vreg_128, %5:vreg_128, %3:sgpr_256, 15, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec
+    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -163,11 +163,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -181,11 +181,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sreg_256, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sreg_256, 11, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5:vreg_128, %3:sgpr_256, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_V3_V4 %5:vreg_128, %3:sgpr_256, 11, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -199,7 +199,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@@ -218,8 +218,8 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
-    %4:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %4:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %5:vgpr_32 = COPY %2.sub3
     %6:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %7:vgpr_32 = IMAGE_LOAD_V1_V4 %6, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -237,7 +237,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -255,7 +255,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -273,7 +273,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -291,7 +291,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -309,7 +309,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -327,7 +327,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vreg_64 = IMAGE_LOAD_V2_V4 %5, %3, 8, 0, 0, 0, 0, 1, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
@@ -345,7 +345,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vreg_64 = IMAGE_LOAD_V2_V4 %5, %3, 8, 0, 0, 0, 0, 0, 1, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
@@ -363,7 +363,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_LOAD_V1_V4 %5, %3, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -382,11 +382,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_MIP_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_MIP_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_MIP_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_MIP_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -403,11 +403,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_MIP_PCK_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_MIP_PCK_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_MIP_PCK_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_MIP_PCK_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -424,11 +424,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_MIP_PCK_SGN_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_MIP_PCK_SGN_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_MIP_PCK_SGN_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_MIP_PCK_SGN_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -443,11 +443,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_PCK_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_PCK_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_PCK_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_PCK_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---
 
@@ -462,10 +462,10 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_LOAD_PCK_SGN_V1_V4 %5:vreg_128, %3:sreg_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_LOAD_PCK_SGN_V3_V4 %5:vreg_128, %3:sreg_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %6:vgpr_32 = IMAGE_LOAD_PCK_SGN_V1_V4 %5:vreg_128, %3:sgpr_256, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_LOAD_PCK_SGN_V3_V4 %5:vreg_128, %3:sgpr_256, 14, 0, 0, 0, 0, 0, 0, -1, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
 ...
 ---

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-image-sample.mir b/llvm/test/CodeGen/AMDGPU/merge-image-sample.mir
index 41e121dd2683..3d0c5932925e 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-image-sample.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-image-sample.mir
@@ -11,11 +11,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 # GFX9-LABEL: name: image_sample_l_merged_v1v3_reversed
@@ -29,11 +29,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -48,11 +48,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
-    %7:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4) 
+    %6:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
+    %7:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4) 
 ...
 ---
 
@@ -67,11 +67,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
-    %7:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4) 
+    %6:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 12, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4)
+    %7:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 3, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 8, align 16, addrspace 4) 
 ...
 ---
 
@@ -86,11 +86,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
-    %7:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4) 
+    %6:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %7:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4) 
 ...
 ---
 
@@ -105,11 +105,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
-    %7:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4) 
+    %6:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4)
+    %7:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4) 
 ...
 ---
 
@@ -122,15 +122,15 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
     %7:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %8:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %9:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %7:vreg_128, %3:sreg_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %9:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %7:vreg_128, %3:sgpr_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
     %10:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %11:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %11:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -144,12 +144,12 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vreg_128 = COPY %2
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    IMAGE_STORE_V4_V4 %4:vreg_128, %5:vreg_128, %3:sreg_256, 15, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    IMAGE_STORE_V4_V4 %4:vreg_128, %5:vreg_128, %3:sgpr_256, 15, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec
+    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -163,11 +163,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 7, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -181,11 +181,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 11, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 4, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 11, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -199,7 +199,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@@ -218,8 +218,8 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
-    %4:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %4:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %5:vgpr_32 = COPY %2.sub3
     %6:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %7:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %6, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -238,7 +238,7 @@ body:             |
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
     %3:sgpr_128 = COPY $sgpr92_sgpr93_sgpr94_sgpr95
-    %4:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %4:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %5:vgpr_32 = COPY %2.sub3
     %6:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %7:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %6, %4, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -256,7 +256,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -274,7 +274,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -292,7 +292,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -310,7 +310,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -328,7 +328,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5, %3, %2, 8, 0, 0, 0, 0, 1, 0, -1, 0, implicit $exec :: (dereferenceable load 8, addrspace 4)
@@ -346,7 +346,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vreg_64 = IMAGE_SAMPLE_L_V2_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 1, -1, 0, implicit $exec :: (dereferenceable load 8, addrspace 4)
@@ -364,7 +364,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -382,7 +382,7 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
     %6:vgpr_32 = IMAGE_SAMPLE_L_V1_V4 %5, %3, %2, 8, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
@@ -404,11 +404,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -424,11 +424,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_B_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_B_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_B_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_B_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -444,11 +444,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_B_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_B_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_B_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_B_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -464,11 +464,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_B_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_B_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_B_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_B_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -484,11 +484,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_B_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_B_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_B_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_B_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -504,11 +504,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -524,11 +524,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_CD_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_CD_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_CD_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_CD_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -544,11 +544,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_CD_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_CD_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_CD_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_CD_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -564,11 +564,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_CD_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_CD_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_CD_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_CD_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -584,11 +584,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_CD_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_CD_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_CD_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_CD_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -604,11 +604,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -624,11 +624,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -644,11 +644,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_B_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_B_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_B_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_B_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -664,11 +664,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_B_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_B_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_B_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_B_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -684,11 +684,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_B_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_B_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_B_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_B_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -704,11 +704,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_B_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_B_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_B_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_B_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -724,11 +724,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_CD_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_CD_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -744,11 +744,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_CD_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_CD_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -764,11 +764,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_CD_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_CD_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -784,11 +784,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_CD_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_CD_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_CD_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -804,11 +804,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -824,11 +824,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -844,11 +844,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_D_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_D_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_D_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_D_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -864,11 +864,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_D_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_D_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_D_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_D_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -884,11 +884,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_D_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_D_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_D_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_D_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -904,11 +904,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_D_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_D_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_D_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_D_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -924,11 +924,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_L_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_L_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_L_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_L_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -944,11 +944,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_LZ_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_LZ_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_LZ_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_LZ_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -964,11 +964,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_LZ_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_LZ_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_LZ_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_LZ_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -984,11 +984,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_L_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_L_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_L_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_L_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1004,11 +1004,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_C_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_C_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_C_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_C_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1024,11 +1024,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_D_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_D_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_D_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_D_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1044,11 +1044,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_D_CL_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_D_CL_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_D_CL_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_D_CL_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1064,11 +1064,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_D_CL_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_D_CL_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_D_CL_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_D_CL_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1084,11 +1084,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_D_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_D_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_D_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_D_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1104,11 +1104,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_LZ_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_LZ_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_LZ_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_LZ_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1124,11 +1124,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_LZ_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_LZ_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_LZ_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_LZ_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1144,11 +1144,11 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_L_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_L_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_L_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_L_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---
 
@@ -1164,10 +1164,10 @@ body:             |
     %0:sgpr_64 = COPY $sgpr0_sgpr1
     %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
     %2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
-    %3:sreg_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
+    %3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
     %4:vgpr_32 = COPY %2.sub3
     %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
-    %6:vgpr_32 = IMAGE_SAMPLE_O_V1_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
-    %7:vreg_96 = IMAGE_SAMPLE_O_V3_V4 %5:vreg_128, %3:sreg_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
+    %6:vgpr_32 = IMAGE_SAMPLE_O_V1_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 1, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 4, addrspace 4)
+    %7:vreg_96 = IMAGE_SAMPLE_O_V3_V4 %5:vreg_128, %3:sgpr_256, %2:sgpr_128, 14, 0, 0, 0, 0, 0, 0, -1, 0, implicit $exec :: (dereferenceable load 12, align 16, addrspace 4) 
 ...
 ---

diff  --git a/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir b/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir
index ec774291067a..4a69057b1f10 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir
@@ -126,13 +126,13 @@ body:             |
   ; GCN-LABEL: name: exec_copy_to_subreg
   ; GCN: bb.0:
   ; GCN:   successors: %bb.1(0x80000000)
-  ; GCN:   dead undef %0.sub0:sreg_256 = COPY $exec
+  ; GCN:   dead undef %0.sub0:sgpr_256 = COPY $exec
   ; GCN:   dead %1:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, undef %2:sreg_64_xexec, implicit $exec
   ; GCN:   S_BRANCH %bb.1
   ; GCN: bb.1:
   bb.0:
 
-    undef %0.sub0:sreg_256 = COPY $exec
+    undef %0.sub0:sgpr_256 = COPY $exec
     %2:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, undef %1:sreg_64_xexec, implicit $exec
     S_BRANCH %bb.1
 

diff  --git a/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir b/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
index 778c12b3dae0..1bac81699edd 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
@@ -89,14 +89,14 @@ body: |
     dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $exec
     undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $exec
     undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $exec
-    undef %11.sub0:sreg_256 = S_MOV_B32 0
-    %11.sub1:sreg_256 = COPY %11.sub0
-    %11.sub2:sreg_256 = COPY %11.sub0
-    %11.sub3:sreg_256 = COPY %11.sub0
-    %11.sub4:sreg_256 = COPY %11.sub0
-    %11.sub5:sreg_256 = COPY %11.sub0
-    %11.sub6:sreg_256 = COPY %11.sub0
-    %11.sub7:sreg_256 = COPY %11.sub0
+    undef %11.sub0:sgpr_256 = S_MOV_B32 0
+    %11.sub1:sgpr_256 = COPY %11.sub0
+    %11.sub2:sgpr_256 = COPY %11.sub0
+    %11.sub3:sgpr_256 = COPY %11.sub0
+    %11.sub4:sgpr_256 = COPY %11.sub0
+    %11.sub5:sgpr_256 = COPY %11.sub0
+    %11.sub6:sgpr_256 = COPY %11.sub0
+    %11.sub7:sgpr_256 = COPY %11.sub0
     %12:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %9, %11, undef %13:sgpr_128, 15, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
     %14:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
     %15:vreg_128 = IMPLICIT_DEF
@@ -257,14 +257,14 @@ body: |
     successors: %bb.22(0x80000000)
     undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $exec
     %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $exec
-    undef %61.sub0:sreg_256 = S_MOV_B32 0
-    %61.sub1:sreg_256 = COPY %61.sub0
-    %61.sub2:sreg_256 = COPY %61.sub0
-    %61.sub3:sreg_256 = COPY %61.sub0
-    %61.sub4:sreg_256 = COPY %61.sub0
-    %61.sub5:sreg_256 = COPY %61.sub0
-    %61.sub6:sreg_256 = COPY %61.sub0
-    %61.sub7:sreg_256 = COPY %61.sub0
+    undef %61.sub0:sgpr_256 = S_MOV_B32 0
+    %61.sub1:sgpr_256 = COPY %61.sub0
+    %61.sub2:sgpr_256 = COPY %61.sub0
+    %61.sub3:sgpr_256 = COPY %61.sub0
+    %61.sub4:sgpr_256 = COPY %61.sub0
+    %61.sub5:sgpr_256 = COPY %61.sub0
+    %61.sub6:sgpr_256 = COPY %61.sub0
+    %61.sub7:sgpr_256 = COPY %61.sub0
     %62:vgpr_32 = V_MOV_B32_e32 1033100696, implicit $exec
     %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $exec
     %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $exec


        


More information about the llvm-commits mailing list