[llvm] cd7650c - GlobalISel: Implement fewerElementsVector for G_SEXT_INREG

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 3 11:47:47 PST 2020


Author: Matt Arsenault
Date: 2020-02-03T11:47:33-08:00
New Revision: cd7650c186179ad156bfa0d8324d6662f2828aae

URL: https://github.com/llvm/llvm-project/commit/cd7650c186179ad156bfa0d8324d6662f2828aae
DIFF: https://github.com/llvm/llvm-project/commit/cd7650c186179ad156bfa0d8324d6662f2828aae.diff

LOG: GlobalISel: Implement fewerElementsVector for G_SEXT_INREG

Start using a new strategy with a combination of merge and unmerges.

This allows scalarizing before lowering, which in cases like
<2 x s128> avoids producing giant illegal shifts.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
    llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
    llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-sext.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext-inreg.mir

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 22b8dbd1685d..6dcb43d6c485 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -179,9 +179,19 @@ class LegalizerHelper {
   /// \p NarrowTy is the desired result merge source type. If the source value
   /// needs to be widened to evenly cover \p DstReg, inserts high bits
   /// corresponding to the extension opcode \p PadStrategy.
-  void buildLCMMerge(Register DstReg, LLT NarrowTy, LLT GCDTy,
-                     SmallVectorImpl<Register> &VRegs,
-                     unsigned PadStrategy = TargetOpcode::G_ANYEXT);
+  ///
+  /// \p VRegs will be cleared, and the the result \p NarrowTy register pieces
+  /// will replace it. Returns The complete LCMTy that \p VRegs will cover when
+  /// merged.
+  LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
+                          SmallVectorImpl<Register> &VRegs,
+                          unsigned PadStrategy = TargetOpcode::G_ANYEXT);
+
+  /// Merge the values in \p RemergeRegs to an \p LCMTy typed value. Extract the
+  /// low bits into \p DstReg. This is intended to use the outputs from
+  /// buildLCMMergePieces after processing.
+  void buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
+                                ArrayRef<Register> RemergeRegs);
 
   /// Perform generic multiplication of values held in multiple registers.
   /// Generated instructions use only types NarrowTy and i1.
@@ -230,6 +240,9 @@ class LegalizerHelper {
   LegalizeResult
   reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
 
+  LegalizeResult fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
+                                              LLT NarrowTy);
+
   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                              LLT HalfTy, LLT ShiftAmtTy);
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index fd9b0f31bb61..cc56d1e957a6 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -518,6 +518,11 @@ class MachineIRBuilder {
   /// \return The newly created instruction.
   MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op);
 
+  /// Build and insert \p Res = G_SEXT_INREG \p Op, ImmOp
+  MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp) {
+    return buildInstr(TargetOpcode::G_SEXT_INREG, {Res}, {Op, SrcOp(ImmOp)});
+  }
+
   /// Build and insert \p Res = G_FPEXT \p Op
   MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op,
                                  Optional<unsigned> Flags = None) {

diff  --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index ae66c1894efe..6a34a3111fc7 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -284,7 +284,7 @@ LLT LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
                                     LLT NarrowTy, Register SrcReg) {
   LLT SrcTy = MRI.getType(SrcReg);
 
-  LLT GCDTy = getGCDType(getGCDType(SrcTy, NarrowTy), DstTy);
+  LLT GCDTy = getGCDType(DstTy, getGCDType(SrcTy, NarrowTy));
   if (SrcTy == GCDTy) {
     // If the source already evenly divides the result type, we don't need to do
     // anything.
@@ -298,10 +298,9 @@ LLT LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
   return GCDTy;
 }
 
-void LegalizerHelper::buildLCMMerge(Register DstReg, LLT NarrowTy, LLT GCDTy,
-                                    SmallVectorImpl<Register> &VRegs,
-                                    unsigned PadStrategy) {
-  LLT DstTy = MRI.getType(DstReg);
+LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
+                                         SmallVectorImpl<Register> &VRegs,
+                                         unsigned PadStrategy) {
   LLT LCMTy = getLCMType(DstTy, NarrowTy);
 
   int NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits();
@@ -388,12 +387,34 @@ void LegalizerHelper::buildLCMMerge(Register DstReg, LLT NarrowTy, LLT GCDTy,
       AllPadReg = Remerge[I];
   }
 
+  VRegs = std::move(Remerge);
+  return LCMTy;
+}
+
+void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
+                                               ArrayRef<Register> RemergeRegs) {
+  LLT DstTy = MRI.getType(DstReg);
+
   // Create the merge to the widened source, and extract the relevant bits into
   // the result.
-  if (DstTy == LCMTy)
-    MIRBuilder.buildMerge(DstReg, Remerge);
-  else
-    MIRBuilder.buildTrunc(DstReg, MIRBuilder.buildMerge(LCMTy, Remerge));
+
+  if (DstTy == LCMTy) {
+    MIRBuilder.buildMerge(DstReg, RemergeRegs);
+    return;
+  }
+
+  auto Remerge = MIRBuilder.buildMerge(LCMTy, RemergeRegs);
+  if (DstTy.isScalar() && LCMTy.isScalar()) {
+    MIRBuilder.buildTrunc(DstReg, Remerge);
+    return;
+  }
+
+  if (LCMTy.isVector()) {
+    MIRBuilder.buildExtract(DstReg, Remerge, 0);
+    return;
+  }
+
+  llvm_unreachable("unhandled case");
 }
 
 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
@@ -3179,6 +3200,28 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
   return Legalized;
 }
 
+LegalizerHelper::LegalizeResult
+LegalizerHelper::fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
+                                              LLT NarrowTy) {
+  Register DstReg = MI.getOperand(0).getReg();
+  Register SrcReg = MI.getOperand(1).getReg();
+  int64_t Imm = MI.getOperand(2).getImm();
+
+  LLT DstTy = MRI.getType(DstReg);
+
+  SmallVector<Register, 8> Parts;
+  LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
+  LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts);
+
+  for (Register &R : Parts)
+    R = MIRBuilder.buildSExtInReg(NarrowTy, R, Imm).getReg(0);
+
+  buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
+
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult
 LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
                                      LLT NarrowTy) {
@@ -3275,6 +3318,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
   case G_LOAD:
   case G_STORE:
     return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
+  case G_SEXT_INREG:
+    return fewerElementsVectorSextInReg(MI, TypeIdx, NarrowTy);
   default:
     return UnableToLegalize;
   }
@@ -3888,7 +3933,9 @@ LegalizerHelper::narrowScalarExt(MachineInstr &MI, unsigned TypeIdx,
 
   SmallVector<Register, 8> Parts;
   LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
-  buildLCMMerge(DstReg, NarrowTy, GCDTy, Parts, MI.getOpcode());
+  LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, MI.getOpcode());
+  buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
+
   MI.eraseFromParent();
   return Legalized;
 }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index fcf4dbc1273a..7d9f41e64ab7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -151,7 +151,8 @@ static LegalityPredicate isRegisterType(unsigned TypeIdx) {
 
 static LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT Type) {
   return [=](const LegalityQuery &Query) {
-    return Query.Types[TypeIdx].getElementType() == Type;
+    const LLT QueryTy = Query.Types[TypeIdx];
+    return QueryTy.isVector() && QueryTy.getElementType() == Type;
   };
 }
 
@@ -1127,7 +1128,23 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   // TODO: Make legal for s32, s64. s64 case needs break down in regbankselect.
-  getActionDefinitionsBuilder(G_SEXT_INREG)
+  auto &SextInReg = getActionDefinitionsBuilder(G_SEXT_INREG);
+  if (ST.hasVOP3PInsts()) {
+    SextInReg.lowerFor({{S32}, {S64}, {S16}, {V2S16}})
+      // Prefer to reduce vector widths for 16-bit vectors before lowering, to
+      // get more vector shift opportunities, since we'll get those when
+      // expanded.
+      .fewerElementsIf(elementTypeIs(0, S16), changeTo(0, V2S16));
+  } else if (ST.has16BitInsts()) {
+    SextInReg.lowerFor({{S32}, {S64}, {S16}});
+  } else {
+    // Prefer to promote to s32 before lowering if we don't have 16-bit
+    // shifts. This avoid a lot of intermediate truncate and extend operations.
+    SextInReg.lowerFor({{S32}, {S64}});
+  }
+
+  SextInReg
+    .scalarize(0)
     .clampScalar(0, MinLegalScalarShiftTy, S64)
     .lower();
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-sext.mir
index 43bc6b00929b..6355b91d8b31 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-sext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-sext.mir
@@ -11,11 +11,11 @@ body: |
     ; CHECK-LABEL: name: test_sext_trunc_v2s32_to_v2s16_to_v2s32
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
     ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY [[COPY]](<2 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -37,11 +37,11 @@ body: |
     ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
     ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C]](s32)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
-    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[COPY1]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s32)
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
-    ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY2]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY2]](s32)
     ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[COPY3]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[ASHR]](s64), [[ASHR1]](s64)
@@ -66,24 +66,15 @@ body: |
     ; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[TRUNC]](<2 x s16>)
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
-    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[COPY1]](s32)
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
-    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
-    ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[COPY4]](s32)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
-    ; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
-    ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
-    ; CHECK: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[ASHR2]], [[COPY6]](s32)
-    ; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C1]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32)
+    ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s32)
+    ; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
     ; CHECK: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
     %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
@@ -101,13 +92,13 @@ body: |
     ; CHECK-LABEL: name: test_sext_trunc_v3s32_to_v3s16_to_v3s32
     ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
index bb1592ba5ef0..289b1b152624 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
@@ -206,11 +206,11 @@ body: |
     ; CHECK-LABEL: name: extract_vector_elt_0_v2i8_i32
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 0
@@ -250,11 +250,11 @@ body: |
     ; CHECK-LABEL: name: extract_vector_elt_0_v2i1_i32
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 0
@@ -277,11 +277,11 @@ body: |
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
     ; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C1]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C1]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C1]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C]](s1)
@@ -306,11 +306,11 @@ body: |
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[COPY]](<2 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<2 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<2 x s32>), [[COPY1]](s32)
@@ -335,13 +335,13 @@ body: |
     ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
     ; CHECK: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[COPY1]](s32)
@@ -366,15 +366,15 @@ body: |
     ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; CHECK: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY [[COPY]](<4 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<4 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
-    ; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
+    ; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; CHECK: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32), [[ASHR3]](s32)
     ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<4 x s32>), [[COPY1]](s32)
@@ -477,13 +477,13 @@ body: |
     ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
     ; CHECK: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[COPY1]](s32)
@@ -507,13 +507,13 @@ body: |
     ; CHECK-LABEL: name: extract_vector_elt_v3s16_idx0_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<3 x s32>), 0
@@ -537,13 +537,13 @@ body: |
     ; CHECK-LABEL: name: extract_vector_elt_v3s16_idx1_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<3 x s32>), 32
@@ -567,13 +567,13 @@ body: |
     ; CHECK-LABEL: name: extract_vector_elt_v3s16_idx2_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
     ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<3 x s32>), 64

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext-inreg.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext-inreg.mir
index 03b0343b5e14..a013d7c172de 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext-inreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sext-inreg.mir
@@ -770,31 +770,31 @@ body: |
 
     ; GFX9-LABEL: name: test_sext_inreg_v2s32_1
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX9: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX9: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
     ; GFX8-LABEL: name: test_sext_inreg_v2s32_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX8: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX8: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
     ; GFX6-LABEL: name: test_sext_inreg_v2s32_1
     ; GFX6: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
     ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -818,16 +818,16 @@ body: |
     ; GFX9: $vgpr0 = COPY [[ASHR]](<2 x s16>)
     ; GFX8-LABEL: name: test_sext_inreg_v2s16_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
-    ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
     ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
-    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
-    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C]](s16)
-    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C]](s16)
+    ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16)
+    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C1]](s16)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C1]](s16)
+    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C1]](s16)
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR]](s16), [[ASHR1]](s16)
     ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
     ; GFX6-LABEL: name: test_sext_inreg_v2s16_1
@@ -835,24 +835,15 @@ body: |
     ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
     ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
-    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
-    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[COPY1]](s32)
-    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
-    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
-    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[COPY4]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
-    ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
-    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
-    ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[ASHR2]], [[COPY6]](s32)
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
+    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C1]](s32)
+    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32)
+    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s32)
+    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; GFX6: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
     %0:_(<2 x s16>) = COPY $vgpr0
@@ -869,79 +860,56 @@ body: |
     ; GFX9-LABEL: name: test_sext_inreg_v3s16_1
     ; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
     ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[COPY]](<4 x s16>), 0
-    ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
-    ; GFX9: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[DEF]](s16)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
-    ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
-    ; GFX9: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT]](<3 x s16>), 0
-    ; GFX9: [[EXTRACT3:%[0-9]+]]:_(s16) = G_EXTRACT [[INSERT1]](<4 x s16>), 32
-    ; GFX9: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT1]](<3 x s16>), 0
-    ; GFX9: [[EXTRACT4:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT2]](<4 x s16>), 0
-    ; GFX9: [[COPY1:%[0-9]+]]:_(s16) = COPY [[C]](s16)
-    ; GFX9: [[SHL:%[0-9]+]]:_(<2 x s16>) = G_SHL [[EXTRACT2]], [[EXTRACT4]](<2 x s16>)
-    ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[EXTRACT3]], [[COPY1]](s16)
-    ; GFX9: [[DEF4:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[EXTRACT5:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF4]](<4 x s16>), 0
-    ; GFX9: [[DEF5:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF5]], [[EXTRACT5]](<3 x s16>), 0
-    ; GFX9: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT3]], [[SHL]](<2 x s16>), 0
-    ; GFX9: [[EXTRACT6:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT4]](<4 x s16>), 0
-    ; GFX9: [[DEF6:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT5:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF6]], [[EXTRACT6]](<3 x s16>), 0
-    ; GFX9: [[INSERT6:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT5]], [[SHL1]](s16), 32
-    ; GFX9: [[EXTRACT7:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT6]](<4 x s16>), 0
-    ; GFX9: [[DEF7:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT7:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF7]], [[EXTRACT7]](<3 x s16>), 0
-    ; GFX9: [[EXTRACT8:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT7]](<4 x s16>), 0
-    ; GFX9: [[DEF8:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT8:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF8]], [[EXTRACT7]](<3 x s16>), 0
-    ; GFX9: [[EXTRACT9:%[0-9]+]]:_(s16) = G_EXTRACT [[INSERT8]](<4 x s16>), 32
-    ; GFX9: [[DEF9:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT9:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF9]], [[EXTRACT1]](<3 x s16>), 0
-    ; GFX9: [[EXTRACT10:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT9]](<4 x s16>), 0
-    ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[C]](s16)
-    ; GFX9: [[ASHR:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[EXTRACT8]], [[EXTRACT10]](<2 x s16>)
-    ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[EXTRACT9]], [[COPY2]](s16)
-    ; GFX9: [[DEF10:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[EXTRACT11:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF10]](<4 x s16>), 0
-    ; GFX9: [[DEF11:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT10:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF11]], [[EXTRACT11]](<3 x s16>), 0
-    ; GFX9: [[INSERT11:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT10]], [[ASHR]](<2 x s16>), 0
-    ; GFX9: [[EXTRACT12:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT11]](<4 x s16>), 0
-    ; GFX9: [[DEF12:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT12:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF12]], [[EXTRACT12]](<3 x s16>), 0
-    ; GFX9: [[INSERT13:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT12]], [[ASHR1]](s16), 32
-    ; GFX9: [[EXTRACT13:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT13]](<4 x s16>), 0
-    ; GFX9: [[INSERT14:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[COPY]], [[EXTRACT13]](<3 x s16>), 0
-    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT14]](<4 x s16>)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[EXTRACT]](<3 x s16>), 0
+    ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+    ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+    ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+    ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+    ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+    ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+    ; GFX9: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
+    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF1]](s16)
+    ; GFX9: [[DEF2:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+    ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16)
+    ; GFX9: [[SHL:%[0-9]+]]:_(<2 x s16>) = G_SHL [[BUILD_VECTOR]], [[BUILD_VECTOR2]](<2 x s16>)
+    ; GFX9: [[ASHR:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL]], [[BUILD_VECTOR2]](<2 x s16>)
+    ; GFX9: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16)
+    ; GFX9: [[SHL1:%[0-9]+]]:_(<2 x s16>) = G_SHL [[BUILD_VECTOR1]], [[BUILD_VECTOR3]](<2 x s16>)
+    ; GFX9: [[ASHR1:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL1]], [[BUILD_VECTOR3]](<2 x s16>)
+    ; GFX9: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16)
+    ; GFX9: [[SHL2:%[0-9]+]]:_(<2 x s16>) = G_SHL [[DEF2]], [[BUILD_VECTOR4]](<2 x s16>)
+    ; GFX9: [[ASHR2:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL2]], [[BUILD_VECTOR4]](<2 x s16>)
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[ASHR]](<2 x s16>), [[ASHR1]](<2 x s16>), [[ASHR2]](<2 x s16>)
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<6 x s16>), 0
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[COPY]], [[EXTRACT1]](<3 x s16>), 0
+    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT1]](<4 x s16>)
     ; GFX8-LABEL: name: test_sext_inreg_v3s16_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
     ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[COPY]](<4 x s16>), 0
-    ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; GFX8: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[EXTRACT]](<3 x s16>), 0
     ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
     ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
     ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
-    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
     ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
     ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
-    ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
-    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C]](s16)
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C]](s16)
-    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C]](s16)
-    ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[C]](s16)
+    ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+    ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16)
+    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C1]](s16)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C1]](s16)
+    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C1]](s16)
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C1]](s16)
+    ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[C1]](s16)
     ; GFX8: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR]](s16), [[ASHR1]](s16)
     ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR2]](s16), [[DEF1]](s16)
@@ -960,33 +928,19 @@ body: |
     ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
     ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
-    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
-    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[COPY1]](s32)
-    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[COPY3]](s32)
-    ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C1]](s32)
-    ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
-    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[COPY6]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
-    ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
-    ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s32)
-    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL4]], [[C]](s32)
-    ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[ASHR2]], [[COPY8]](s32)
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
-    ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[SHL2]](s32)
-    ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY11]], [[C]](s32)
-    ; GFX6: [[ASHR4:%[0-9]+]]:_(s32) = G_ASHR [[SHL5]], [[C]](s32)
-    ; GFX6: [[ASHR5:%[0-9]+]]:_(s32) = G_ASHR [[ASHR4]], [[COPY10]](s32)
-    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR5]](s32)
+    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C1]](s32)
+    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32)
+    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s32)
+    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
+    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
+    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C1]](s32)
+    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR2]](s32)
     ; GFX6: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF1]](s16)
@@ -1009,37 +963,37 @@ body: |
 
     ; GFX9-LABEL: name: test_sext_inreg_v3s32_1
     ; GFX9: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX9: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX9: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX9: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
     ; GFX8-LABEL: name: test_sext_inreg_v3s32_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX8: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX8: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX8: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
     ; GFX6-LABEL: name: test_sext_inreg_v3s32_1
     ; GFX6: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32)
     ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -1056,43 +1010,43 @@ body: |
 
     ; GFX9-LABEL: name: test_sext_inreg_v4s32_1
     ; GFX9: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
-    ; GFX9: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; GFX9: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX9: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX9: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
+    ; GFX9: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; GFX9: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
     ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32), [[ASHR3]](s32)
     ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     ; GFX8-LABEL: name: test_sext_inreg_v4s32_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
-    ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; GFX8: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX8: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX8: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
+    ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; GFX8: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32), [[ASHR3]](s32)
     ; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     ; GFX6-LABEL: name: test_sext_inreg_v4s32_1
     ; GFX6: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[C]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[C]](s32)
     ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[UV2]], [[C]](s32)
     ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
+    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[UV3]], [[C]](s32)
     ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C]](s32)
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32), [[ASHR2]](s32), [[ASHR3]](s32)
     ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -1109,39 +1063,37 @@ body: |
 
     ; GFX9-LABEL: name: test_sext_inreg_v4s16_1
     ; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
-    ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
     ; GFX9: [[SHL:%[0-9]+]]:_(<2 x s16>) = G_SHL [[UV]], [[BUILD_VECTOR]](<2 x s16>)
+    ; GFX9: [[ASHR:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s16>)
+    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
     ; GFX9: [[SHL1:%[0-9]+]]:_(<2 x s16>) = G_SHL [[UV1]], [[BUILD_VECTOR1]](<2 x s16>)
-    ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[ASHR:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL]], [[BUILD_VECTOR2]](<2 x s16>)
-    ; GFX9: [[ASHR1:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL1]], [[BUILD_VECTOR3]](<2 x s16>)
+    ; GFX9: [[ASHR1:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL1]], [[BUILD_VECTOR1]](<2 x s16>)
     ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[ASHR]](<2 x s16>), [[ASHR1]](<2 x s16>)
     ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
     ; GFX8-LABEL: name: test_sext_inreg_v4s16_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
-    ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
     ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
     ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
-    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
     ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
     ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
-    ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
+    ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
     ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
-    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C]](s16)
-    ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C]](s16)
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C]](s16)
-    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C]](s16)
-    ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[C]](s16)
-    ; GFX8: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[C]](s16)
+    ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16)
+    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C1]](s16)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C1]](s16)
+    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C1]](s16)
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C1]](s16)
+    ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[C1]](s16)
+    ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C1]](s16)
+    ; GFX8: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[C1]](s16)
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR]](s16), [[ASHR1]](s16)
     ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR2]](s16), [[ASHR3]](s16)
     ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
@@ -1154,42 +1106,23 @@ body: |
     ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
     ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
-    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
-    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[COPY1]](s32)
-    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[COPY3]](s32)
-    ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[COPY5]](s32)
-    ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C1]](s32)
-    ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
-    ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s32)
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL4]], [[C]](s32)
-    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[COPY8]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
-    ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
-    ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY11]], [[C]](s32)
-    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL5]], [[C]](s32)
-    ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[ASHR2]], [[COPY10]](s32)
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
-    ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY [[SHL2]](s32)
-    ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY13]], [[C]](s32)
-    ; GFX6: [[ASHR4:%[0-9]+]]:_(s32) = G_ASHR [[SHL6]], [[C]](s32)
-    ; GFX6: [[ASHR5:%[0-9]+]]:_(s32) = G_ASHR [[ASHR4]], [[COPY12]](s32)
-    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR5]](s32)
-    ; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY [[SHL3]](s32)
-    ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY15]], [[C]](s32)
-    ; GFX6: [[ASHR6:%[0-9]+]]:_(s32) = G_ASHR [[SHL7]], [[C]](s32)
-    ; GFX6: [[ASHR7:%[0-9]+]]:_(s32) = G_ASHR [[ASHR6]], [[COPY14]](s32)
-    ; GFX6: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR7]](s32)
+    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C1]](s32)
+    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32)
+    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s32)
+    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
+    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
+    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C1]](s32)
+    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR2]](s32)
+    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C1]](s32)
+    ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C1]](s32)
+    ; GFX6: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
     ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
@@ -1207,52 +1140,49 @@ body: |
     ; GFX9-LABEL: name: test_sext_inreg_v6s16_1
     ; GFX9: [[DEF:%[0-9]+]]:_(<6 x s32>) = G_IMPLICIT_DEF
     ; GFX9: [[TRUNC:%[0-9]+]]:_(<6 x s16>) = G_TRUNC [[DEF]](<6 x s32>)
-    ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[TRUNC]](<6 x s16>)
+    ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
     ; GFX9: [[SHL:%[0-9]+]]:_(<2 x s16>) = G_SHL [[UV]], [[BUILD_VECTOR]](<2 x s16>)
+    ; GFX9: [[ASHR:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s16>)
+    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
     ; GFX9: [[SHL1:%[0-9]+]]:_(<2 x s16>) = G_SHL [[UV1]], [[BUILD_VECTOR1]](<2 x s16>)
+    ; GFX9: [[ASHR1:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL1]], [[BUILD_VECTOR1]](<2 x s16>)
+    ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
     ; GFX9: [[SHL2:%[0-9]+]]:_(<2 x s16>) = G_SHL [[UV2]], [[BUILD_VECTOR2]](<2 x s16>)
-    ; GFX9: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[BUILD_VECTOR5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
-    ; GFX9: [[ASHR:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL]], [[BUILD_VECTOR3]](<2 x s16>)
-    ; GFX9: [[ASHR1:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL1]], [[BUILD_VECTOR4]](<2 x s16>)
-    ; GFX9: [[ASHR2:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL2]], [[BUILD_VECTOR5]](<2 x s16>)
+    ; GFX9: [[ASHR2:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[SHL2]], [[BUILD_VECTOR2]](<2 x s16>)
     ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[ASHR]](<2 x s16>), [[ASHR1]](<2 x s16>), [[ASHR2]](<2 x s16>)
     ; GFX9: S_ENDPGM 0, implicit [[CONCAT_VECTORS]](<6 x s16>)
     ; GFX8-LABEL: name: test_sext_inreg_v6s16_1
     ; GFX8: [[DEF:%[0-9]+]]:_(<6 x s32>) = G_IMPLICIT_DEF
     ; GFX8: [[TRUNC:%[0-9]+]]:_(<6 x s16>) = G_TRUNC [[DEF]](<6 x s32>)
-    ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
     ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[TRUNC]](<6 x s16>)
     ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
     ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
-    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
     ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
     ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
-    ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
+    ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
     ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
     ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
     ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
-    ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C1]](s32)
+    ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
     ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
-    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C]](s16)
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C]](s16)
-    ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC4]], [[C]](s16)
-    ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[TRUNC5]], [[C]](s16)
-    ; GFX8: [[SHL5:%[0-9]+]]:_(s16) = G_SHL [[TRUNC6]], [[C]](s16)
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C]](s16)
-    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C]](s16)
-    ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[C]](s16)
-    ; GFX8: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[C]](s16)
-    ; GFX8: [[ASHR4:%[0-9]+]]:_(s16) = G_ASHR [[SHL4]], [[C]](s16)
-    ; GFX8: [[ASHR5:%[0-9]+]]:_(s16) = G_ASHR [[SHL5]], [[C]](s16)
+    ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+    ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C1]](s16)
+    ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[C1]](s16)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C1]](s16)
+    ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[C1]](s16)
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C1]](s16)
+    ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[C1]](s16)
+    ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC4]], [[C1]](s16)
+    ; GFX8: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[C1]](s16)
+    ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[TRUNC5]], [[C1]](s16)
+    ; GFX8: [[ASHR4:%[0-9]+]]:_(s16) = G_ASHR [[SHL4]], [[C1]](s16)
+    ; GFX8: [[SHL5:%[0-9]+]]:_(s16) = G_SHL [[TRUNC6]], [[C1]](s16)
+    ; GFX8: [[ASHR5:%[0-9]+]]:_(s16) = G_ASHR [[SHL5]], [[C1]](s16)
     ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR]](s16), [[ASHR1]](s16)
     ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR2]](s16), [[ASHR3]](s16)
     ; GFX8: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[ASHR4]](s16), [[ASHR5]](s16)
@@ -1269,60 +1199,31 @@ body: |
     ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
     ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
     ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
-    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
-    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[COPY]](s32)
-    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[COPY2]](s32)
-    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[COPY4]](s32)
-    ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[COPY6]](s32)
-    ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
-    ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[COPY8]](s32)
-    ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
-    ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY10]], [[C1]](s32)
-    ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
-    ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY12]], [[C]](s32)
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL6]], [[C]](s32)
-    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[COPY11]](s32)
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
-    ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
-    ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY14]], [[C]](s32)
-    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL7]], [[C]](s32)
-    ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[ASHR2]], [[COPY13]](s32)
-    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
-    ; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY16:%[0-9]+]]:_(s32) = COPY [[SHL2]](s32)
-    ; GFX6: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[COPY16]], [[C]](s32)
-    ; GFX6: [[ASHR4:%[0-9]+]]:_(s32) = G_ASHR [[SHL8]], [[C]](s32)
-    ; GFX6: [[ASHR5:%[0-9]+]]:_(s32) = G_ASHR [[ASHR4]], [[COPY15]](s32)
-    ; GFX6: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR5]](s32)
-    ; GFX6: [[COPY17:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY18:%[0-9]+]]:_(s32) = COPY [[SHL3]](s32)
-    ; GFX6: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C]](s32)
-    ; GFX6: [[ASHR6:%[0-9]+]]:_(s32) = G_ASHR [[SHL9]], [[C]](s32)
-    ; GFX6: [[ASHR7:%[0-9]+]]:_(s32) = G_ASHR [[ASHR6]], [[COPY17]](s32)
-    ; GFX6: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR7]](s32)
-    ; GFX6: [[COPY19:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY20:%[0-9]+]]:_(s32) = COPY [[SHL4]](s32)
-    ; GFX6: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[COPY20]], [[C]](s32)
-    ; GFX6: [[ASHR8:%[0-9]+]]:_(s32) = G_ASHR [[SHL10]], [[C]](s32)
-    ; GFX6: [[ASHR9:%[0-9]+]]:_(s32) = G_ASHR [[ASHR8]], [[COPY19]](s32)
-    ; GFX6: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR9]](s32)
-    ; GFX6: [[COPY21:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; GFX6: [[COPY22:%[0-9]+]]:_(s32) = COPY [[SHL5]](s32)
-    ; GFX6: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[COPY22]], [[C]](s32)
-    ; GFX6: [[ASHR10:%[0-9]+]]:_(s32) = G_ASHR [[SHL11]], [[C]](s32)
-    ; GFX6: [[ASHR11:%[0-9]+]]:_(s32) = G_ASHR [[ASHR10]], [[COPY21]](s32)
-    ; GFX6: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR11]](s32)
+    ; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C1]](s32)
+    ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C1]](s32)
+    ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s32)
+    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
+    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32)
+    ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C1]](s32)
+    ; GFX6: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR2]](s32)
+    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
+    ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[C1]](s32)
+    ; GFX6: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
+    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+    ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C1]](s32)
+    ; GFX6: [[ASHR4:%[0-9]+]]:_(s32) = G_ASHR [[SHL4]], [[C1]](s32)
+    ; GFX6: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR4]](s32)
+    ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+    ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C1]](s32)
+    ; GFX6: [[ASHR5:%[0-9]+]]:_(s32) = G_ASHR [[SHL5]], [[C1]](s32)
+    ; GFX6: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR5]](s32)
     ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC1]](s16), [[TRUNC2]](s16)
     ; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
     ; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[TRUNC6]](s16)
@@ -1343,210 +1244,66 @@ body: |
 
     ; GFX9-LABEL: name: test_sext_inreg_v2s128_1
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[C1]](s64)
     ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
-    ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
-    ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
-    ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
-    ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
-    ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[TRUNC]](s32)
-    ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB1]](s32)
-    ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
-    ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
-    ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB]](s32)
-    ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C1]]
-    ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
-    ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV3]], [[SELECT1]]
-    ; GFX9: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
-    ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC1]], [[C2]]
-    ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC1]]
-    ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC1]](s32), [[C2]]
-    ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC1]](s32), [[C3]]
-    ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[TRUNC1]](s32)
-    ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB3]](s32)
-    ; GFX9: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC1]](s32)
-    ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL4]]
-    ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB2]](s32)
-    ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C1]]
-    ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[SHL5]]
-    ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV5]], [[SELECT4]]
-    ; GFX9: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC2]], [[C2]]
-    ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC2]]
-    ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC2]](s32), [[C2]]
-    ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC2]](s32), [[C3]]
-    ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[TRUNC2]](s32)
-    ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[SELECT]], [[TRUNC2]](s32)
-    ; GFX9: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[SELECT2]], [[SUB5]](s32)
-    ; GFX9: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[SHL6]]
-    ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[C4]](s32)
-    ; GFX9: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[SUB4]](s32)
-    ; GFX9: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR2]], [[ASHR2]]
-    ; GFX9: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[SELECT]], [[SELECT6]]
-    ; GFX9: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[ASHR]], [[ASHR1]]
-    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT7]](s64), [[SELECT8]](s64)
-    ; GFX9: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[C2]]
-    ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC3]]
-    ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC3]](s32), [[C2]]
-    ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC3]](s32), [[C3]]
-    ; GFX9: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[TRUNC3]](s32)
-    ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[SELECT3]], [[TRUNC3]](s32)
-    ; GFX9: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[SELECT5]], [[SUB7]](s32)
-    ; GFX9: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL7]]
-    ; GFX9: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[C4]](s32)
-    ; GFX9: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[SUB6]](s32)
-    ; GFX9: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR3]], [[ASHR5]]
-    ; GFX9: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[SELECT3]], [[SELECT9]]
-    ; GFX9: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[ASHR3]], [[ASHR4]]
-    ; GFX9: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT10]](s64), [[SELECT11]](s64)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV1]](s128), [[MV2]](s128)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[UV]](s128)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+    ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[TRUNC]], [[C]](s32)
+    ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s32)
+    ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[COPY2]](s32)
+    ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR]](s64), [[ASHR1]](s64)
+    ; GFX9: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[UV1]](s128)
+    ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[TRUNC1]], [[COPY3]](s32)
+    ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX9: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[COPY4]](s32)
+    ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX9: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[ASHR2]], [[COPY5]](s32)
+    ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR2]](s64), [[ASHR3]](s64)
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
     ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
     ; GFX8-LABEL: name: test_sext_inreg_v2s128_1
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-    ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
-    ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[C1]](s64)
     ; GFX8: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
-    ; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
-    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
-    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[TRUNC]](s32)
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB1]](s32)
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
-    ; GFX8: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB]](s32)
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C1]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV3]], [[SELECT1]]
-    ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX8: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
-    ; GFX8: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC1]], [[C2]]
-    ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC1]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC1]](s32), [[C2]]
-    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC1]](s32), [[C3]]
-    ; GFX8: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[TRUNC1]](s32)
-    ; GFX8: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB3]](s32)
-    ; GFX8: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC1]](s32)
-    ; GFX8: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL4]]
-    ; GFX8: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB2]](s32)
-    ; GFX8: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C1]]
-    ; GFX8: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[SHL5]]
-    ; GFX8: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV5]], [[SELECT4]]
-    ; GFX8: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX8: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC2]], [[C2]]
-    ; GFX8: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC2]]
-    ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC2]](s32), [[C2]]
-    ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC2]](s32), [[C3]]
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[TRUNC2]](s32)
-    ; GFX8: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[SELECT]], [[TRUNC2]](s32)
-    ; GFX8: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[SELECT2]], [[SUB5]](s32)
-    ; GFX8: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[SHL6]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[C4]](s32)
-    ; GFX8: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[SUB4]](s32)
-    ; GFX8: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR2]], [[ASHR2]]
-    ; GFX8: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[SELECT]], [[SELECT6]]
-    ; GFX8: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[ASHR]], [[ASHR1]]
-    ; GFX8: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT7]](s64), [[SELECT8]](s64)
-    ; GFX8: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX8: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[C2]]
-    ; GFX8: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC3]]
-    ; GFX8: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC3]](s32), [[C2]]
-    ; GFX8: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC3]](s32), [[C3]]
-    ; GFX8: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[TRUNC3]](s32)
-    ; GFX8: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[SELECT3]], [[TRUNC3]](s32)
-    ; GFX8: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[SELECT5]], [[SUB7]](s32)
-    ; GFX8: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL7]]
-    ; GFX8: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[C4]](s32)
-    ; GFX8: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[SUB6]](s32)
-    ; GFX8: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR3]], [[ASHR5]]
-    ; GFX8: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[SELECT3]], [[SELECT9]]
-    ; GFX8: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[ASHR3]], [[ASHR4]]
-    ; GFX8: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT10]](s64), [[SELECT11]](s64)
-    ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV1]](s128), [[MV2]](s128)
+    ; GFX8: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[UV]](s128)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[TRUNC]], [[C]](s32)
+    ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s32)
+    ; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[COPY2]](s32)
+    ; GFX8: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR]](s64), [[ASHR1]](s64)
+    ; GFX8: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[UV1]](s128)
+    ; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[TRUNC1]], [[COPY3]](s32)
+    ; GFX8: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX8: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[COPY4]](s32)
+    ; GFX8: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX8: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[ASHR2]], [[COPY5]](s32)
+    ; GFX8: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR2]](s64), [[ASHR3]](s64)
+    ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
     ; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
     ; GFX6-LABEL: name: test_sext_inreg_v2s128_1
     ; GFX6: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-    ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
-    ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[C1]](s64)
     ; GFX6: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
-    ; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
-    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
-    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[TRUNC]](s32)
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB1]](s32)
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
-    ; GFX6: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB]](s32)
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C1]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV3]], [[SELECT1]]
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX6: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
-    ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC1]], [[C2]]
-    ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC1]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC1]](s32), [[C2]]
-    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC1]](s32), [[C3]]
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[TRUNC1]](s32)
-    ; GFX6: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB3]](s32)
-    ; GFX6: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC1]](s32)
-    ; GFX6: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL4]]
-    ; GFX6: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB2]](s32)
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C1]]
-    ; GFX6: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[SHL5]]
-    ; GFX6: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV5]], [[SELECT4]]
-    ; GFX6: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC2]], [[C2]]
-    ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC2]]
-    ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC2]](s32), [[C2]]
-    ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC2]](s32), [[C3]]
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[TRUNC2]](s32)
-    ; GFX6: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[SELECT]], [[TRUNC2]](s32)
-    ; GFX6: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[SELECT2]], [[SUB5]](s32)
-    ; GFX6: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[SHL6]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[C4]](s32)
-    ; GFX6: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SELECT2]], [[SUB4]](s32)
-    ; GFX6: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR2]], [[ASHR2]]
-    ; GFX6: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[SELECT]], [[SELECT6]]
-    ; GFX6: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[ASHR]], [[ASHR1]]
-    ; GFX6: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT7]](s64), [[SELECT8]](s64)
-    ; GFX6: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
-    ; GFX6: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[C2]]
-    ; GFX6: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC3]]
-    ; GFX6: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC3]](s32), [[C2]]
-    ; GFX6: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC3]](s32), [[C3]]
-    ; GFX6: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[TRUNC3]](s32)
-    ; GFX6: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[SELECT3]], [[TRUNC3]](s32)
-    ; GFX6: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[SELECT5]], [[SUB7]](s32)
-    ; GFX6: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL7]]
-    ; GFX6: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[C4]](s32)
-    ; GFX6: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[SELECT5]], [[SUB6]](s32)
-    ; GFX6: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR3]], [[ASHR5]]
-    ; GFX6: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[SELECT3]], [[SELECT9]]
-    ; GFX6: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[ASHR3]], [[ASHR4]]
-    ; GFX6: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT10]](s64), [[SELECT11]](s64)
-    ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV1]](s128), [[MV2]](s128)
+    ; GFX6: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[UV]](s128)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[TRUNC]], [[C]](s32)
+    ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s32)
+    ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[COPY2]](s32)
+    ; GFX6: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR]](s64), [[ASHR1]](s64)
+    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[UV1]](s128)
+    ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[TRUNC1]], [[COPY3]](s32)
+    ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX6: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[COPY4]](s32)
+    ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; GFX6: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[ASHR2]], [[COPY5]](s32)
+    ; GFX6: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR2]](s64), [[ASHR3]](s64)
+    ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
     ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
     %0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
     %1:_(<2 x s128>) = G_SEXT_INREG %0, 1


        


More information about the llvm-commits mailing list