[llvm] 8aa81aa - AMDGPU/GlobalISel: Fixed handling of non-standard vectors

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Wed May 27 15:44:19 PDT 2020


Author: Stanislav Mekhanoshin
Date: 2020-05-27T15:44:09-07:00
New Revision: 8aa81aaebe533d0721f1c00deeb0fc452b0147a5

URL: https://github.com/llvm/llvm-project/commit/8aa81aaebe533d0721f1c00deeb0fc452b0147a5
DIFF: https://github.com/llvm/llvm-project/commit/8aa81aaebe533d0721f1c00deeb0fc452b0147a5.diff

LOG: AMDGPU/GlobalISel: Fixed handling of non-standard vectors

We do not have register classes for all possible vector
sizes, so round it up for extract vector element.

Also fixes selection of G_MERGE_VALUES when vectors are
not a power of two.

This has required to refactor getRegSplitParts() in way
that it can handle not just power of two vectors.

Ideally we would like RegSplitParts to be generated by
tablegen.

Differential Revision: https://reviews.llvm.org/D80457

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
    llvm/lib/Target/AMDGPU/SIRegisterInfo.h
    llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 7657a2d0ea2c..f2c4fa2d60e7 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -26,6 +26,7 @@
 #include "llvm/CodeGen/SlotIndexes.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/LLVMContext.h"
+#include <vector>
 
 using namespace llvm;
 
@@ -38,6 +39,8 @@ static cl::opt<bool> EnableSpillSGPRToVGPR(
   cl::ReallyHidden,
   cl::init(true));
 
+std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
+
 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
     : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
       SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
@@ -53,6 +56,30 @@ SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
   RegPressureIgnoredUnits.set(*MCRegUnitIterator(AMDGPU::M0, this));
   for (auto Reg : AMDGPU::VGPR_HI16RegClass)
     RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
+
+  // HACK: Until this is fully tablegen'd.
+  static llvm::once_flag InitializeRegSplitPartsFlag;
+
+  static auto InitializeRegSplitPartsOnce = [this]() {
+    for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
+      unsigned Size = getSubRegIdxSize(Idx);
+      if (Size & 31)
+        continue;
+      std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
+      unsigned Pos = getSubRegIdxOffset(Idx);
+      if (Pos % Size)
+        continue;
+      Pos /= Size;
+      if (Vec.empty()) {
+        unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
+        Vec.resize(MaxNumParts);
+      }
+      Vec[Pos] = Idx;
+    }
+  };
+
+
+  llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
 }
 
 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
@@ -1313,88 +1340,82 @@ StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
 
 const TargetRegisterClass *
 SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) {
-  switch (BitWidth) {
-  case 1:
+  if (BitWidth == 1)
     return &AMDGPU::VReg_1RegClass;
-  case 16:
+  if (BitWidth <= 16)
     return &AMDGPU::VGPR_LO16RegClass;
-  case 32:
+  if (BitWidth <= 32)
     return &AMDGPU::VGPR_32RegClass;
-  case 64:
+  if (BitWidth <= 64)
     return &AMDGPU::VReg_64RegClass;
-  case 96:
+  if (BitWidth <= 96)
     return &AMDGPU::VReg_96RegClass;
-  case 128:
+  if (BitWidth <= 128)
     return &AMDGPU::VReg_128RegClass;
-  case 160:
+  if (BitWidth <= 160)
     return &AMDGPU::VReg_160RegClass;
-  case 192:
+  if (BitWidth <= 192)
     return &AMDGPU::VReg_192RegClass;
-  case 256:
+  if (BitWidth <= 256)
     return &AMDGPU::VReg_256RegClass;
-  case 512:
+  if (BitWidth <= 512)
     return &AMDGPU::VReg_512RegClass;
-  case 1024:
+  if (BitWidth <= 1024)
     return &AMDGPU::VReg_1024RegClass;
-  default:
-    return nullptr;
-  }
+
+  return nullptr;
 }
 
 const TargetRegisterClass *
 SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) {
-  switch (BitWidth) {
-  case 16:
+  if (BitWidth <= 16)
     return &AMDGPU::AGPR_LO16RegClass;
-  case 32:
+  if (BitWidth <= 32)
     return &AMDGPU::AGPR_32RegClass;
-  case 64:
+  if (BitWidth <= 64)
     return &AMDGPU::AReg_64RegClass;
-  case 96:
+  if (BitWidth <= 96)
     return &AMDGPU::AReg_96RegClass;
-  case 128:
+  if (BitWidth <= 128)
     return &AMDGPU::AReg_128RegClass;
-  case 160:
+  if (BitWidth <= 160)
     return &AMDGPU::AReg_160RegClass;
-  case 192:
+  if (BitWidth <= 192)
     return &AMDGPU::AReg_192RegClass;
-  case 256:
+  if (BitWidth <= 256)
     return &AMDGPU::AReg_256RegClass;
-  case 512:
+  if (BitWidth <= 512)
     return &AMDGPU::AReg_512RegClass;
-  case 1024:
+  if (BitWidth <= 1024)
     return &AMDGPU::AReg_1024RegClass;
-  default:
-    return nullptr;
-  }
+
+  return nullptr;
 }
 
 const TargetRegisterClass *
 SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
-  switch (BitWidth) {
-  case 16:
+  if (BitWidth <= 16)
     return &AMDGPU::SGPR_LO16RegClass;
-  case 32:
+  if (BitWidth <= 32)
     return &AMDGPU::SReg_32RegClass;
-  case 64:
+  if (BitWidth <= 64)
     return &AMDGPU::SReg_64RegClass;
-  case 96:
+  if (BitWidth <= 96)
     return &AMDGPU::SGPR_96RegClass;
-  case 128:
+  if (BitWidth <= 128)
     return &AMDGPU::SGPR_128RegClass;
-  case 160:
+  if (BitWidth <= 160)
     return &AMDGPU::SGPR_160RegClass;
-  case 192:
+  if (BitWidth <= 192)
     return &AMDGPU::SGPR_192RegClass;
-  case 256:
+  if (BitWidth <= 256)
     return &AMDGPU::SGPR_256RegClass;
-  case 512:
+  if (BitWidth <= 512)
     return &AMDGPU::SGPR_512RegClass;
-  case 1024:
+  if (BitWidth <= 1024)
     return &AMDGPU::SGPR_1024RegClass;
-  default:
-    return nullptr;
-  }
+
+  return nullptr;
 }
 
 // FIXME: This is very slow. It might be worth creating a map from physreg to
@@ -1579,65 +1600,14 @@ ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC
   const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
   assert(RegBitWidth >= 32 && RegBitWidth <= 1024);
 
-  const unsigned EltBitWidth = EltSize * 8;
-  assert(EltBitWidth >= 32 && EltBitWidth < 1024 && isPowerOf2_32(EltBitWidth));
-  const unsigned LogEltBitWidth = Log2_32(EltBitWidth);
-
-  assert(RegBitWidth % EltBitWidth == 0);
+  const unsigned RegDWORDs = RegBitWidth / 32;
+  const unsigned EltDWORDs = EltSize / 4;
+  assert(RegSplitParts.size() + 1 >= EltDWORDs);
 
-  if (RegBitWidth == EltBitWidth)
-    return {};
-
-  static const int16_t Sub_32[] = {
-    AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
-    AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
-    AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
-    AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
-    AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19,
-    AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23,
-    AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27,
-    AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31
-  };
-
-  static const int16_t Sub_64[] = {
-    AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
-    AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
-    AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
-    AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
-    AMDGPU::sub16_sub17, AMDGPU::sub18_sub19,
-    AMDGPU::sub20_sub21, AMDGPU::sub22_sub23,
-    AMDGPU::sub24_sub25, AMDGPU::sub26_sub27,
-    AMDGPU::sub28_sub29, AMDGPU::sub30_sub31
-  };
-
-  static const int16_t Sub_128[] = {
-    AMDGPU::sub0_sub1_sub2_sub3,
-    AMDGPU::sub4_sub5_sub6_sub7,
-    AMDGPU::sub8_sub9_sub10_sub11,
-    AMDGPU::sub12_sub13_sub14_sub15,
-    AMDGPU::sub16_sub17_sub18_sub19,
-    AMDGPU::sub20_sub21_sub22_sub23,
-    AMDGPU::sub24_sub25_sub26_sub27,
-    AMDGPU::sub28_sub29_sub30_sub31
-  };
-
-  static const int16_t Sub_256[] = {
-    AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7,
-    AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15,
-    AMDGPU::sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23,
-    AMDGPU::sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
-  };
-
-  static const int16_t Sub_512[] = {
-    AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15,
-    AMDGPU::sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
-  };
-
-  static const int16_t *const Subs[] = {
-    Sub_32, Sub_64, Sub_128, Sub_256, Sub_512
-  };
+  const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
+  const unsigned NumParts = RegDWORDs / EltDWORDs;
 
-  return makeArrayRef(Subs[LogEltBitWidth - 5], RegBitWidth >> LogEltBitWidth);
+  return makeArrayRef(Parts.data(), NumParts);
 }
 
 const TargetRegisterClass*

diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index 0b2920b3777e..8a8ac8169453 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -33,6 +33,13 @@ class SIRegisterInfo final : public AMDGPUGenRegisterInfo {
   bool isWave32;
   BitVector RegPressureIgnoredUnits;
 
+  /// Sub reg indexes for getRegSplitParts.
+  /// First index represents subreg size from 1 to 16 DWORDs.
+  /// The inner vector is sorted by bit offset.
+  /// Provided a register can be fully split with given subregs,
+  /// all elements of the inner vector combined give a full lane mask.
+  static std::array<std::vector<int16_t>, 16> RegSplitParts;
+
   void reserveRegisterTuples(BitVector &, MCRegister Reg) const;
 
 public:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
index 5a7b4b390b5d..443944408f33 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
@@ -1517,3 +1517,639 @@ entry:
   %ext = extractelement <16 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0, double 8.0, double 9.0, double 10.0, double 11.0, double 12.0, double 13.0, double 14.0, double 15.0, double 16.0>, i32 %sel
   ret double %ext
 }
+
+define amdgpu_ps float @dyn_extract_v6f32_s_v(<6 x float> inreg %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f32_s_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    s_mov_b64 s[6:7], exec
+; GPRIDX-NEXT:  BB33_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s8, v0
+; GPRIDX-NEXT:    s_mov_b32 m0, s8
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v0
+; GPRIDX-NEXT:    s_movrels_b32 s8, s0
+; GPRIDX-NEXT:    v_mov_b32_e32 v1, s8
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB33_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[6:7]
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v1
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v6f32_s_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    s_mov_b64 s[6:7], exec
+; MOVREL-NEXT:  BB33_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s8, v0
+; MOVREL-NEXT:    s_mov_b32 m0, s8
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v0
+; MOVREL-NEXT:    s_movrels_b32 s8, s0
+; MOVREL-NEXT:    v_mov_b32_e32 v1, s8
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB33_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[6:7]
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v1
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <6 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define float @dyn_extract_v6f32_v_v(<6 x float> %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f32_v_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GPRIDX-NEXT:    s_mov_b64 s[4:5], exec
+; GPRIDX-NEXT:  BB34_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v6
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v6
+; GPRIDX-NEXT:    s_set_gpr_idx_on s6, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v7, v0
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB34_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[4:5]
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v7
+; GPRIDX-NEXT:    s_setpc_b64 s[30:31]
+;
+; MOVREL-LABEL: dyn_extract_v6f32_v_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; MOVREL-NEXT:    s_mov_b64 s[4:5], exec
+; MOVREL-NEXT:  BB34_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s6, v6
+; MOVREL-NEXT:    s_mov_b32 m0, s6
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v6
+; MOVREL-NEXT:    v_movrels_b32_e32 v7, v0
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB34_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[4:5]
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v7
+; MOVREL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %ext = extractelement <6 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define amdgpu_ps float @dyn_extract_v6f32_v_s(<6 x float> %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f32_v_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_set_gpr_idx_on s2, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v0
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v6f32_v_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 m0, s2
+; MOVREL-NEXT:    v_movrels_b32_e32 v0, v0
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <6 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define amdgpu_ps float @dyn_extract_v6f32_s_s(<6 x float> inreg %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f32_s_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 m0, s8
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    s_movrels_b32 s0, s0
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, s0
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v6f32_s_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 m0, s8
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    s_movrels_b32 s0, s0
+; MOVREL-NEXT:    v_mov_b32_e32 v0, s0
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <6 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define amdgpu_ps float @dyn_extract_v7f32_s_v(<7 x float> inreg %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f32_s_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s6, s8
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    s_mov_b64 s[8:9], exec
+; GPRIDX-NEXT:  BB37_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s7, v0
+; GPRIDX-NEXT:    s_mov_b32 m0, s7
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s7, v0
+; GPRIDX-NEXT:    s_movrels_b32 s7, s0
+; GPRIDX-NEXT:    v_mov_b32_e32 v1, s7
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB37_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[8:9]
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v1
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v7f32_s_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s6, s8
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    s_mov_b64 s[8:9], exec
+; MOVREL-NEXT:  BB37_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s7, v0
+; MOVREL-NEXT:    s_mov_b32 m0, s7
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s7, v0
+; MOVREL-NEXT:    s_movrels_b32 s7, s0
+; MOVREL-NEXT:    v_mov_b32_e32 v1, s7
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB37_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[8:9]
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v1
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <7 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define float @dyn_extract_v7f32_v_v(<7 x float> %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f32_v_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GPRIDX-NEXT:    s_mov_b64 s[4:5], exec
+; GPRIDX-NEXT:  BB38_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v7
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v7
+; GPRIDX-NEXT:    s_set_gpr_idx_on s6, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v8, v0
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB38_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[4:5]
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v8
+; GPRIDX-NEXT:    s_setpc_b64 s[30:31]
+;
+; MOVREL-LABEL: dyn_extract_v7f32_v_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; MOVREL-NEXT:    s_mov_b64 s[4:5], exec
+; MOVREL-NEXT:  BB38_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s6, v7
+; MOVREL-NEXT:    s_mov_b32 m0, s6
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v7
+; MOVREL-NEXT:    v_movrels_b32_e32 v8, v0
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB38_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[4:5]
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v8
+; MOVREL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %ext = extractelement <7 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define amdgpu_ps float @dyn_extract_v7f32_v_s(<7 x float> %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f32_v_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_set_gpr_idx_on s2, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v0
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v7f32_v_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 m0, s2
+; MOVREL-NEXT:    v_movrels_b32_e32 v0, v0
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <7 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define amdgpu_ps float @dyn_extract_v7f32_s_s(<7 x float> inreg %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f32_s_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 m0, s9
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    s_mov_b32 s6, s8
+; GPRIDX-NEXT:    s_movrels_b32 s0, s0
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, s0
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v7f32_s_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 m0, s9
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    s_mov_b32 s6, s8
+; MOVREL-NEXT:    s_movrels_b32 s0, s0
+; MOVREL-NEXT:    v_mov_b32_e32 v0, s0
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <7 x float> %vec, i32 %sel
+  ret float %ext
+}
+
+define amdgpu_ps double @dyn_extract_v6f64_s_v(<6 x double> inreg %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f64_s_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s16, s2
+; GPRIDX-NEXT:    s_mov_b32 s17, s3
+; GPRIDX-NEXT:    s_mov_b32 s18, s4
+; GPRIDX-NEXT:    s_mov_b32 s19, s5
+; GPRIDX-NEXT:    s_mov_b32 s20, s6
+; GPRIDX-NEXT:    s_mov_b32 s21, s7
+; GPRIDX-NEXT:    s_mov_b32 s22, s8
+; GPRIDX-NEXT:    s_mov_b32 s23, s9
+; GPRIDX-NEXT:    s_mov_b32 s24, s10
+; GPRIDX-NEXT:    s_mov_b32 s25, s11
+; GPRIDX-NEXT:    s_mov_b32 s26, s12
+; GPRIDX-NEXT:    s_mov_b32 s27, s13
+; GPRIDX-NEXT:    s_mov_b64 s[2:3], exec
+; GPRIDX-NEXT:  BB41_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v0
+; GPRIDX-NEXT:    s_lshl_b32 m0, s0, 1
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; GPRIDX-NEXT:    s_movrels_b32 s0, s16
+; GPRIDX-NEXT:    s_movrels_b32 s1, s17
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB41_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[2:3]
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v6f64_s_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s16, s2
+; MOVREL-NEXT:    s_mov_b32 s17, s3
+; MOVREL-NEXT:    s_mov_b32 s18, s4
+; MOVREL-NEXT:    s_mov_b32 s19, s5
+; MOVREL-NEXT:    s_mov_b32 s20, s6
+; MOVREL-NEXT:    s_mov_b32 s21, s7
+; MOVREL-NEXT:    s_mov_b32 s22, s8
+; MOVREL-NEXT:    s_mov_b32 s23, s9
+; MOVREL-NEXT:    s_mov_b32 s24, s10
+; MOVREL-NEXT:    s_mov_b32 s25, s11
+; MOVREL-NEXT:    s_mov_b32 s26, s12
+; MOVREL-NEXT:    s_mov_b32 s27, s13
+; MOVREL-NEXT:    s_mov_b64 s[2:3], exec
+; MOVREL-NEXT:  BB41_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s0, v0
+; MOVREL-NEXT:    s_lshl_b32 m0, s0, 1
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; MOVREL-NEXT:    s_movrels_b32 s0, s16
+; MOVREL-NEXT:    s_movrels_b32 s1, s17
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB41_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[2:3]
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <6 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define double @dyn_extract_v6f64_v_v(<6 x double> %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f64_v_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GPRIDX-NEXT:    s_mov_b64 s[4:5], exec
+; GPRIDX-NEXT:  BB42_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v12
+; GPRIDX-NEXT:    s_lshl_b32 s7, s6, 1
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v12
+; GPRIDX-NEXT:    s_set_gpr_idx_on s7, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v13, v0
+; GPRIDX-NEXT:    v_mov_b32_e32 v14, v1
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB42_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[4:5]
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v13
+; GPRIDX-NEXT:    v_mov_b32_e32 v1, v14
+; GPRIDX-NEXT:    s_setpc_b64 s[30:31]
+;
+; MOVREL-LABEL: dyn_extract_v6f64_v_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; MOVREL-NEXT:    s_mov_b64 s[4:5], exec
+; MOVREL-NEXT:  BB42_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s6, v12
+; MOVREL-NEXT:    s_lshl_b32 m0, s6, 1
+; MOVREL-NEXT:    v_movrels_b32_e32 v13, v0
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v12
+; MOVREL-NEXT:    v_movrels_b32_e32 v14, v1
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB42_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[4:5]
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v13
+; MOVREL-NEXT:    v_mov_b32_e32 v1, v14
+; MOVREL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %ext = extractelement <6 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define amdgpu_ps double @dyn_extract_v6f64_v_s(<6 x double> %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f64_v_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_lshl_b32 s0, s2, 1
+; GPRIDX-NEXT:    s_set_gpr_idx_on s0, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v12, v0
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v1
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v12
+; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v0
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v6f64_v_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_lshl_b32 m0, s2, 1
+; MOVREL-NEXT:    v_movrels_b32_e32 v12, v0
+; MOVREL-NEXT:    v_movrels_b32_e32 v0, v1
+; MOVREL-NEXT:    v_readfirstlane_b32 s0, v12
+; MOVREL-NEXT:    v_readfirstlane_b32 s1, v0
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <6 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define amdgpu_ps double @dyn_extract_v6f64_s_s(<6 x double> inreg %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v6f64_s_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 m0, s14
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    s_mov_b32 s6, s8
+; GPRIDX-NEXT:    s_mov_b32 s7, s9
+; GPRIDX-NEXT:    s_mov_b32 s8, s10
+; GPRIDX-NEXT:    s_mov_b32 s9, s11
+; GPRIDX-NEXT:    s_mov_b32 s10, s12
+; GPRIDX-NEXT:    s_mov_b32 s11, s13
+; GPRIDX-NEXT:    s_movrels_b64 s[0:1], s[0:1]
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v6f64_s_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 m0, s14
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    s_mov_b32 s6, s8
+; MOVREL-NEXT:    s_mov_b32 s7, s9
+; MOVREL-NEXT:    s_mov_b32 s8, s10
+; MOVREL-NEXT:    s_mov_b32 s9, s11
+; MOVREL-NEXT:    s_mov_b32 s10, s12
+; MOVREL-NEXT:    s_mov_b32 s11, s13
+; MOVREL-NEXT:    s_movrels_b64 s[0:1], s[0:1]
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <6 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f64_s_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s16, s2
+; GPRIDX-NEXT:    s_mov_b32 s17, s3
+; GPRIDX-NEXT:    s_mov_b32 s18, s4
+; GPRIDX-NEXT:    s_mov_b32 s19, s5
+; GPRIDX-NEXT:    s_mov_b32 s20, s6
+; GPRIDX-NEXT:    s_mov_b32 s21, s7
+; GPRIDX-NEXT:    s_mov_b32 s22, s8
+; GPRIDX-NEXT:    s_mov_b32 s23, s9
+; GPRIDX-NEXT:    s_mov_b32 s24, s10
+; GPRIDX-NEXT:    s_mov_b32 s25, s11
+; GPRIDX-NEXT:    s_mov_b32 s26, s12
+; GPRIDX-NEXT:    s_mov_b32 s27, s13
+; GPRIDX-NEXT:    s_mov_b32 s28, s14
+; GPRIDX-NEXT:    s_mov_b32 s29, s15
+; GPRIDX-NEXT:    s_mov_b64 s[2:3], exec
+; GPRIDX-NEXT:  BB45_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v0
+; GPRIDX-NEXT:    s_lshl_b32 m0, s0, 1
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; GPRIDX-NEXT:    s_movrels_b32 s0, s16
+; GPRIDX-NEXT:    s_movrels_b32 s1, s17
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB45_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[2:3]
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v7f64_s_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s16, s2
+; MOVREL-NEXT:    s_mov_b32 s17, s3
+; MOVREL-NEXT:    s_mov_b32 s18, s4
+; MOVREL-NEXT:    s_mov_b32 s19, s5
+; MOVREL-NEXT:    s_mov_b32 s20, s6
+; MOVREL-NEXT:    s_mov_b32 s21, s7
+; MOVREL-NEXT:    s_mov_b32 s22, s8
+; MOVREL-NEXT:    s_mov_b32 s23, s9
+; MOVREL-NEXT:    s_mov_b32 s24, s10
+; MOVREL-NEXT:    s_mov_b32 s25, s11
+; MOVREL-NEXT:    s_mov_b32 s26, s12
+; MOVREL-NEXT:    s_mov_b32 s27, s13
+; MOVREL-NEXT:    s_mov_b32 s28, s14
+; MOVREL-NEXT:    s_mov_b32 s29, s15
+; MOVREL-NEXT:    s_mov_b64 s[2:3], exec
+; MOVREL-NEXT:  BB45_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s0, v0
+; MOVREL-NEXT:    s_lshl_b32 m0, s0, 1
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; MOVREL-NEXT:    s_movrels_b32 s0, s16
+; MOVREL-NEXT:    s_movrels_b32 s1, s17
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB45_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[2:3]
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <7 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define double @dyn_extract_v7f64_v_v(<7 x double> %vec, i32 %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f64_v_v:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GPRIDX-NEXT:    s_mov_b64 s[4:5], exec
+; GPRIDX-NEXT:  BB46_1: ; =>This Inner Loop Header: Depth=1
+; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v14
+; GPRIDX-NEXT:    s_lshl_b32 s7, s6, 1
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v14
+; GPRIDX-NEXT:    s_set_gpr_idx_on s7, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v15, v0
+; GPRIDX-NEXT:    v_mov_b32_e32 v16, v1
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    s_and_saveexec_b64 vcc, vcc
+; GPRIDX-NEXT:    s_xor_b64 exec, exec, vcc
+; GPRIDX-NEXT:    s_cbranch_execnz BB46_1
+; GPRIDX-NEXT:  ; %bb.2:
+; GPRIDX-NEXT:    s_mov_b64 exec, s[4:5]
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v15
+; GPRIDX-NEXT:    v_mov_b32_e32 v1, v16
+; GPRIDX-NEXT:    s_setpc_b64 s[30:31]
+;
+; MOVREL-LABEL: dyn_extract_v7f64_v_v:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; MOVREL-NEXT:    s_mov_b64 s[4:5], exec
+; MOVREL-NEXT:  BB46_1: ; =>This Inner Loop Header: Depth=1
+; MOVREL-NEXT:    v_readfirstlane_b32 s6, v14
+; MOVREL-NEXT:    s_lshl_b32 m0, s6, 1
+; MOVREL-NEXT:    v_movrels_b32_e32 v15, v0
+; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v14
+; MOVREL-NEXT:    v_movrels_b32_e32 v16, v1
+; MOVREL-NEXT:    s_and_saveexec_b64 vcc, vcc
+; MOVREL-NEXT:    s_xor_b64 exec, exec, vcc
+; MOVREL-NEXT:    s_cbranch_execnz BB46_1
+; MOVREL-NEXT:  ; %bb.2:
+; MOVREL-NEXT:    s_mov_b64 exec, s[4:5]
+; MOVREL-NEXT:    v_mov_b32_e32 v0, v15
+; MOVREL-NEXT:    v_mov_b32_e32 v1, v16
+; MOVREL-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %ext = extractelement <7 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define amdgpu_ps double @dyn_extract_v7f64_v_s(<7 x double> %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f64_v_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_lshl_b32 s0, s2, 1
+; GPRIDX-NEXT:    s_set_gpr_idx_on s0, gpr_idx(SRC0)
+; GPRIDX-NEXT:    v_mov_b32_e32 v14, v0
+; GPRIDX-NEXT:    v_mov_b32_e32 v0, v1
+; GPRIDX-NEXT:    s_set_gpr_idx_off
+; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v14
+; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v0
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v7f64_v_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_lshl_b32 m0, s2, 1
+; MOVREL-NEXT:    v_movrels_b32_e32 v14, v0
+; MOVREL-NEXT:    v_movrels_b32_e32 v0, v1
+; MOVREL-NEXT:    v_readfirstlane_b32 s0, v14
+; MOVREL-NEXT:    v_readfirstlane_b32 s1, v0
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <7 x double> %vec, i32 %sel
+  ret double %ext
+}
+
+define amdgpu_ps double @dyn_extract_v7f64_s_s(<7 x double> inreg %vec, i32 inreg %sel) {
+; GPRIDX-LABEL: dyn_extract_v7f64_s_s:
+; GPRIDX:       ; %bb.0: ; %entry
+; GPRIDX-NEXT:    s_mov_b32 s0, s2
+; GPRIDX-NEXT:    s_mov_b32 s1, s3
+; GPRIDX-NEXT:    s_mov_b32 m0, s16
+; GPRIDX-NEXT:    s_mov_b32 s2, s4
+; GPRIDX-NEXT:    s_mov_b32 s3, s5
+; GPRIDX-NEXT:    s_mov_b32 s4, s6
+; GPRIDX-NEXT:    s_mov_b32 s5, s7
+; GPRIDX-NEXT:    s_mov_b32 s6, s8
+; GPRIDX-NEXT:    s_mov_b32 s7, s9
+; GPRIDX-NEXT:    s_mov_b32 s8, s10
+; GPRIDX-NEXT:    s_mov_b32 s9, s11
+; GPRIDX-NEXT:    s_mov_b32 s10, s12
+; GPRIDX-NEXT:    s_mov_b32 s11, s13
+; GPRIDX-NEXT:    s_mov_b32 s12, s14
+; GPRIDX-NEXT:    s_mov_b32 s13, s15
+; GPRIDX-NEXT:    s_movrels_b64 s[0:1], s[0:1]
+; GPRIDX-NEXT:    ; return to shader part epilog
+;
+; MOVREL-LABEL: dyn_extract_v7f64_s_s:
+; MOVREL:       ; %bb.0: ; %entry
+; MOVREL-NEXT:    s_mov_b32 s0, s2
+; MOVREL-NEXT:    s_mov_b32 s1, s3
+; MOVREL-NEXT:    s_mov_b32 m0, s16
+; MOVREL-NEXT:    s_mov_b32 s2, s4
+; MOVREL-NEXT:    s_mov_b32 s3, s5
+; MOVREL-NEXT:    s_mov_b32 s4, s6
+; MOVREL-NEXT:    s_mov_b32 s5, s7
+; MOVREL-NEXT:    s_mov_b32 s6, s8
+; MOVREL-NEXT:    s_mov_b32 s7, s9
+; MOVREL-NEXT:    s_mov_b32 s8, s10
+; MOVREL-NEXT:    s_mov_b32 s9, s11
+; MOVREL-NEXT:    s_mov_b32 s10, s12
+; MOVREL-NEXT:    s_mov_b32 s11, s13
+; MOVREL-NEXT:    s_mov_b32 s12, s14
+; MOVREL-NEXT:    s_mov_b32 s13, s15
+; MOVREL-NEXT:    s_movrels_b64 s[0:1], s[0:1]
+; MOVREL-NEXT:    ; return to shader part epilog
+entry:
+  %ext = extractelement <7 x double> %vec, i32 %sel
+  ret double %ext
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
index d8f2fad8f938..54cb2a0ab0e0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
@@ -1,10 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=instruction-select -global-isel-abort=2 -pass-remarks-missed='gisel*'  -o - %s 2> %t | FileCheck -check-prefix=GCN  %s
-# RUN: FileCheck -check-prefix=ERR %s < %t
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=instruction-select -global-isel-abort=2 -pass-remarks-missed='gisel*'  -o - %s | FileCheck -check-prefix=GCN  %s
 
-# ERR-NOT: remark:
-# ERR: remark: <unknown>:0:0: cannot select: %2:sgpr(<6 x s64>) = G_CONCAT_VECTORS %0:sgpr(<3 x s64>), %1:sgpr(<3 x s64>) (in function: test_concat_vectors_s_v6s64_s_v3s64_s_v3s64)
-# ERR-NOT: remark:
 
 ---
 name: test_concat_vectors_v_v4s16_v_v2s16_v_v2s16
@@ -634,10 +630,10 @@ body: |
     liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
 
     ; GCN-LABEL: name: test_concat_vectors_s_v6s64_s_v3s64_s_v3s64
-    ; GCN: [[DEF:%[0-9]+]]:sgpr(<3 x s64>) = G_IMPLICIT_DEF
-    ; GCN: [[DEF1:%[0-9]+]]:sgpr(<3 x s64>) = G_IMPLICIT_DEF
-    ; GCN: [[CONCAT_VECTORS:%[0-9]+]]:sgpr(<6 x s64>) = G_CONCAT_VECTORS [[DEF]](<3 x s64>), [[DEF1]](<3 x s64>)
-    ; GCN: S_ENDPGM 0, implicit [[CONCAT_VECTORS]](<6 x s64>)
+    ; GCN: [[DEF:%[0-9]+]]:sgpr_192 = IMPLICIT_DEF
+    ; GCN: [[DEF1:%[0-9]+]]:sgpr_192 = IMPLICIT_DEF
+    ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_512 = REG_SEQUENCE [[DEF]], %subreg.sub0_sub1_sub2_sub3_sub4_sub5, [[DEF1]], %subreg.sub6_sub7_sub8_sub9_sub10_sub11
+    ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
     %0:sgpr(<3 x s64>) = G_IMPLICIT_DEF
     %1:sgpr(<3 x s64>) = G_IMPLICIT_DEF
     %2:sgpr(<6 x s64>) = G_CONCAT_VECTORS %0, %1


        


More information about the llvm-commits mailing list