[llvm] 4eecf17 - [AMDGPU] Always expand ext/insertelement with divergent idx

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Wed May 20 15:51:41 PDT 2020


Author: Stanislav Mekhanoshin
Date: 2020-05-20T15:51:29-07:00
New Revision: 4eecf171645e064ff251ea3f706d3252b54da2f8

URL: https://github.com/llvm/llvm-project/commit/4eecf171645e064ff251ea3f706d3252b54da2f8
DIFF: https://github.com/llvm/llvm-project/commit/4eecf171645e064ff251ea3f706d3252b54da2f8.diff

LOG: [AMDGPU] Always expand ext/insertelement with divergent idx

Even though series of cmd/cndmask can produce quite a lot of
code that is still better than a loop. In case of doubles we
would even produce two loops.

Differential Revision: https://reviews.llvm.org/D80032

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
    llvm/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
    llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/scratch-simple.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1b8ca7b51450..492b4f72a055 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -99,6 +99,12 @@ static cl::opt<bool> VGPRReserveforSGPRSpill(
     "amdgpu-reserve-vgpr-for-sgpr-spill",
     cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true));
 
+static cl::opt<bool> UseDivergentRegisterIndexing(
+  "amdgpu-use-divergent-register-indexing",
+  cl::Hidden,
+  cl::desc("Use indirect register addressing for divergent indexes"),
+  cl::init(false));
+
 static bool hasFP32Denormals(const MachineFunction &MF) {
   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
   return Info->getMode().allFP32Denormals();
@@ -9533,7 +9539,10 @@ SDValue SITargetLowering::performExtractVectorEltCombine(
   // Sub-dword vectors of size 2 dword or less have better implementation.
   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
   // instructions.
-  if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
+  // Always do this if var-idx is divergent, otherwise it will become a loop.
+  if (!UseDivergentRegisterIndexing &&
+      (VecSize <= 256 || N->getOperand(1)->isDivergent()) &&
+      (VecSize > 64 || EltSize >= 32) &&
       !isa<ConstantSDNode>(N->getOperand(1))) {
     SDLoc SL(N);
     SDValue Idx = N->getOperand(1);
@@ -9603,8 +9612,10 @@ SITargetLowering::performInsertVectorEltCombine(SDNode *N,
   // Sub-dword vectors of size 2 dword or less have better implementation.
   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
   // instructions.
-  if (isa<ConstantSDNode>(Idx) ||
-      VecSize > 256 || (VecSize <= 64 && EltSize < 32))
+  // Always do this if var-idx is divergent, otherwise it will become a loop.
+  if (UseDivergentRegisterIndexing || isa<ConstantSDNode>(Idx) ||
+      (VecSize > 256 && !Idx->isDivergent()) ||
+      (VecSize <= 64 && EltSize < 32))
     return SDValue();
 
   SelectionDAG &DAG = DCI.DAG;

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index e06d2b24d01e..59a913657e46 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -384,3 +384,31 @@ entry:
   store i32 %zext, i32 addrspace(1)* %out
   ret void
 }
+
+; GCN-LABEL: {{^}}float32_extelt_vec:
+; GCN-NOT: buffer_
+; GCN-DAG: v_cmp_eq_u32_e{{32|64}} [[CC1:[^,]+]], 1, v0
+; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 1.0, 2.0, [[CC1]]
+; GCN-DAG: v_mov_b32_e32 [[LASTVAL:v[0-9]+]], 0x42000000
+; GCN-DAG: v_cmp_ne_u32_e32 [[LASTCC:[^,]+]], 31, v0
+; GCN-DAG: v_cndmask_b32_e{{32|64}} v0, [[LASTVAL]], v{{[0-9]+}}, [[LASTCC]]
+define float @float32_extelt_vec(i32 %sel) {
+entry:
+  %ext = extractelement <32 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0, float 17.0, float 18.0, float 19.0, float 20.0, float 21.0, float 22.0, float 23.0, float 24.0, float 25.0, float 26.0, float 27.0, float 28.0, float 29.0, float 30.0, float 31.0, float 32.0>, i32 %sel
+  ret float %ext
+}
+
+; GCN-LABEL: {{^}}double16_extelt_vec:
+; GCN-NOT: buffer_
+; GCN-DAG: v_mov_b32_e32 [[V1HI:v[0-9]+]], 0x3ff19999
+; GCN-DAG: v_mov_b32_e32 [[V1LO:v[0-9]+]], 0x9999999a
+; GCN-DAG: v_mov_b32_e32 [[V2HI:v[0-9]+]], 0x4000cccc
+; GCN-DAG: v_mov_b32_e32 [[V2LO:v[0-9]+]], 0xcccccccd
+; GCN-DAG: v_cmp_eq_u32_e{{32|64}} [[CC1:[^,]+]], 1, v0
+; GCN-DAG: v_cndmask_b32_e{{32|64}} [[R1HI:v[0-9]+]], [[V1HI]], [[V2HI]], [[CC1]]
+; GCN-DAG: v_cndmask_b32_e{{32|64}} [[R1LO:v[0-9]+]], [[V1LO]], [[V2LO]], [[CC1]]
+define double @double16_extelt_vec(i32 %sel) {
+entry:
+  %ext = extractelement <16 x double> <double 1.1, double 2.1, double 3.1, double 4.1, double 5.1, double 6.1, double 7.1, double 8.1, double 9.1, double 10.1, double 11.1, double 12.1, double 13.1, double 14.1, double 15.1, double 16.1>, i32 %sel
+  ret double %ext
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
index e70c178250ef..6dd763cda54c 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-gfx9.ll
@@ -14,46 +14,10 @@
 ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT15:[0-9]+]], s[[S_ELT15]]
 ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
 
-; GCN-DAG: v_add_u32_e32 [[IDX1:v[0-9]+]], 1, [[IDX0]]
+; GCN: v_cmp_eq_u32_e32
+; GCN-COUNT-32: v_cndmask_b32
 
-; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]]
-
-; IDXMODE: s_set_gpr_idx_on [[READLANE]], gpr_idx(DST)
-; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT0]], [[INS0]]
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN-NEXT: s_xor_b64 exec, exec, vcc
-; GCN: s_cbranch_execnz [[LOOP0]]
-
-; FIXME: Redundant copy
-; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]]
-
-; GCN: s_mov_b64 [[MASK]], exec
-
-; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX1]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX1]]
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL-NEXT: v_movreld_b32_e32 v{{[0-9]+}}, 63
-
-; IDXMODE: s_set_gpr_idx_on [[READLANE]], gpr_idx(DST)
-; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 63
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN-NEXT: s_xor_b64 exec, exec, vcc
-; GCN: s_cbranch_execnz [[LOOP1]]
-
-; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
-
-; GCN: buffer_store_dword [[INS0]]
+; GCN-COUNT-4: buffer_store_dwordx4
 define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<16 x i32> addrspace(1)* %out0, <16 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <16 x i32> %vec0) #0 {
 entry:
   %id = call i32 @llvm.amdgcn.workitem.id.x() #1

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
index 72f9d457b6f6..3f4761943d4d 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si-pregfx9.ll
@@ -19,44 +19,10 @@
 
 ; GCN-DAG: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], vcc, 1, [[IDX0]]
 
-; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
-; GCN: s_and_saveexec_b64 vcc, vcc
+; GCN: v_cmp_eq_u32_e32
+; GCN-COUNT-32: v_cndmask_b32
 
-; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]]
-
-; IDXMODE: s_set_gpr_idx_on [[READLANE]], gpr_idx(DST)
-; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT0]], [[INS0]]
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN-NEXT: s_xor_b64 exec, exec, vcc
-; GCN: s_cbranch_execnz [[LOOP0]]
-
-; FIXME: Redundant copy
-; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]]
-
-; GCN: s_mov_b64 [[MASK]], exec
-
-; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX1]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX1]]
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL-NEXT: v_movreld_b32_e32 v{{[0-9]+}}, 63
-
-; IDXMODE: s_set_gpr_idx_on [[READLANE]], gpr_idx(DST)
-; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 63
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN-NEXT: s_xor_b64 exec, exec, vcc
-; GCN: s_cbranch_execnz [[LOOP1]]
-
-; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
-
-; GCN: buffer_store_dword [[INS0]]
+; GCN-COUNT-4: buffer_store_dwordx4
 define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<16 x i32> addrspace(1)* %out0, <16 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <16 x i32> %vec0) #0 {
 entry:
   %id = call i32 @llvm.amdgcn.workitem.id.x() #1

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 2e3b247da4fc..b108e2637536 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -131,22 +131,9 @@ entry:
 ; GCN-LABEL: {{^}}extract_neg_offset_vgpr:
 ; The offset depends on the register that holds the first element of the vector.
 
-; FIXME: The waitcnt for the argument load can go after the loop
-; GCN: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec
-; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]:
-; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}}
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_add_i32 m0, [[READLANE]], 0xfffffe0
-; MOVREL: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1
-
-; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00
-; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], gpr_idx(SRC0)
-; IDXMODE: v_mov_b32_e32 [[RESULT:v[0-9]+]], v1
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN: s_cbranch_execnz
-
+; GCN: v_cmp_eq_u32_e32
+; GCN-COUNT-14: v_cndmask_b32
+; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], 16
 ; GCN: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
 entry:
@@ -301,40 +288,9 @@ entry:
 ; GCN-LABEL: {{^}}insert_neg_offset_vgpr:
 ; The offset depends on the register that holds the first element of the vector.
 
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 5{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 6{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 7{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 8{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 9{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 10{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 11{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 12{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 13{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 14{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 15{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 16{{$}}
-
-; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
-; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]:
-; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_add_i32 m0, [[READLANE]], 0xfffffe00
-; MOVREL: v_movreld_b32_e32 [[VEC_ELT0]], 33
-
-; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
-; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], gpr_idx(DST)
-; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, 33
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN: s_cbranch_execnz [[LOOPBB]]
-; GCN: s_mov_b64 exec, [[SAVEEXEC]]
-
-; GCN: buffer_store_dword
+; GCN: v_cmp_eq_u32_e32
+; GCN-COUNT-16: v_cndmask_b32
+; GCN-COUNT-4:  buffer_store_dwordx4
 define amdgpu_kernel void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <16 x i32> addrspace(1)* %out) {
 entry:
   %id = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -346,38 +302,9 @@ entry:
 
 ; GCN-LABEL: {{^}}insert_neg_inline_offset_vgpr:
 
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 5{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 6{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 7{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 8{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 9{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 10{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 11{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 12{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 13{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 14{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 15{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 16{{$}}
-; GCN-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}}
-
-; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
-
-; The offset depends on the register that holds the first element of the vector.
-; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
-
-; MOVREL: s_add_i32 m0, [[READLANE]], -16
-; MOVREL: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]]
-
-; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[READLANE]], -16
-; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], gpr_idx(DST)
-; IDXMODE: v_mov_b32_e32 [[VEC_ELT0]], [[VAL]]
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN: s_cbranch_execnz
+; GCN: v_cmp_eq_u32_e32
+; GCN-COUNT-16: v_cndmask_b32
+; GCN-COUNT-4:  buffer_store_dwordx4
 define amdgpu_kernel void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <16 x i32> addrspace(1)* %out) {
 entry:
   %id = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -392,60 +319,13 @@ entry:
 
 ; GCN-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
 
-; FIXME: Why is vector copied in between?
-
 ; GCN-DAG: {{buffer|flat|global}}_load_dword [[IDX0:v[0-9]+]]
-; GCN-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
-; GCN-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
-; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]]
-
-; GCN: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
-
-; GCN: s_waitcnt vmcnt(0)
-; PREGFX9: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], vcc, 1, [[IDX0]]
-; GFX9: v_add_{{i32|u32}}_e32 [[IDX1:v[0-9]+]], 1, [[IDX0]]
-
-
-; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
-
-; IDXMODE: s_set_gpr_idx_on [[READLANE]], gpr_idx(SRC0)
-; IDXMODE: v_mov_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN-NEXT: s_xor_b64 exec, exec, vcc
-; GCN-NEXT: s_cbranch_execnz [[LOOP0]]
-
-; FIXME: Redundant copy
-; GCN: s_mov_b64 exec, [[MASK]]
-
-; GCN: v_mov_b32_e32 [[VEC_ELT0_2:v[0-9]+]], [[S_ELT0]]
-
-; GCN: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
-
-; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
-; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX1]]
-; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX1]]
-; GCN: s_and_saveexec_b64 vcc, vcc
-
-; MOVREL: s_mov_b32 m0, [[READLANE]]
-; MOVREL-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT0_2]]
-
-; IDXMODE: s_set_gpr_idx_on [[READLANE]], gpr_idx(SRC0)
-; IDXMODE-NEXT: v_mov_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT0_2]]
-; IDXMODE: s_set_gpr_idx_off
-
-; GCN-NEXT: s_xor_b64 exec, exec, vcc
-; GCN: s_cbranch_execnz [[LOOP1]]
+; GCN: v_cmp_eq_u32
+; GCN: v_cndmask_b32_e64 [[RESULT0:v[0-9]+]], 16,
+; GCN: v_cndmask_b32_e64 [[RESULT1:v[0-9]+]], 16,
 
-; GCN: buffer_store_dword [[MOVREL0]]
-; GCN: buffer_store_dword [[MOVREL1]]
+; GCN: buffer_store_dword [[RESULT0]]
+; GCN: buffer_store_dword [[RESULT1]]
 define amdgpu_kernel void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
 entry:
   %id = call i32 @llvm.amdgcn.workitem.id.x() #1

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
index 0c8e6248a7d2..876542c19fe8 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
@@ -363,3 +363,37 @@ entry:
   store <128 x i1> %v, <128 x i1> addrspace(1)* %out
   ret void
 }
+
+; GCN-LABEL: {{^}}float32_inselt_vec:
+; GCN-NOT: buffer_
+; GCN-COUNT-32: v_cmp_ne_u32
+; GCN-COUNT-32: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, 1.0,
+define amdgpu_ps <32 x float> @float32_inselt_vec(<32 x float> %vec, i32 %sel) {
+entry:
+  %v = insertelement <32 x float> %vec, float 1.000000e+00, i32 %sel
+  ret <32 x float> %v
+}
+
+; GCN-LABEL: {{^}}double8_inselt_vec:
+; GCN-NOT: buffer_
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+; GCN:         v_cmp_eq_u32
+; GCN-COUNT-2: v_cndmask_b32
+define <8 x double> @double8_inselt_vec(<8 x double> %vec, i32 %sel) {
+entry:
+  %v = insertelement <8 x double> %vec, double 1.000000e+00, i32 %sel
+  ret <8 x double> %v
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
index 591f7bfe5aed..0b2eb6a7ae17 100644
--- a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=verde -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,SI,SIVI %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx803 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI,SIVI %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9,GFX9_10 %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -filetype=obj < %s | llvm-readobj -r | FileCheck --check-prefix=RELS %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX10_W32,GFX9_10 %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1010 -mattr=-flat-for-global,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX10_W64,GFX9_10 %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=verde -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,SI,SIVI %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx803 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI,SIVI %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9,GFX9_10 %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -filetype=obj -amdgpu-use-divergent-register-indexing < %s | llvm-readobj -r | FileCheck --check-prefix=RELS %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1010 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX10_W32,GFX9_10 %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1010 -mattr=-flat-for-global,+wavefrontsize64 -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX10_W64,GFX9_10 %s
 
 ; RELS: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD0 0x0
 ; RELS: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD1 0x0


        


More information about the llvm-commits mailing list