[llvm] r292842 - DAG: Don't fold vector extract into load if target doesn't want to
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 23 14:48:54 PST 2017
Author: arsenm
Date: Mon Jan 23 16:48:53 2017
New Revision: 292842
URL: http://llvm.org/viewvc/llvm-project?rev=292842&view=rev
Log:
DAG: Don't fold vector extract into load if target doesn't want to
Fixes turning a 32-bit scalar load into an extending vector load
for AMDGPU when dynamically indexing a vector.
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=292842&r1=292841&r2=292842&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Mon Jan 23 16:48:53 2017
@@ -12560,6 +12560,11 @@ SDValue DAGCombiner::ReplaceExtractVecto
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
return SDValue();
+ ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ?
+ ISD::NON_EXTLOAD : ISD::EXTLOAD;
+ if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT))
+ return SDValue();
+
Align = NewAlign;
SDValue NewPtr = OriginalLoad->getBasePtr();
Modified: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll?rev=292842&r1=292841&r2=292842&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll Mon Jan 23 16:48:53 2017
@@ -15,6 +15,34 @@ define void @extract_vector_elt_v2i16(i1
ret void
}
+; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_sgpr:
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; GCN: s_load_dword [[IDX:s[0-9]+]]
+; GCN: s_lshr_b32 s{{[0-9]+}}, [[IDX]], 16
+; GCN: v_mov_b32_e32 [[VVEC:v[0-9]+]], [[VEC]]
+define void @extract_vector_elt_v2i16_dynamic_sgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %idx) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt = extractelement <2 x i16> %vec, i32 %idx
+ store i16 %elt, i16 addrspace(1)* %out, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_vgpr:
+; GCN: {{buffer|flat}}_load_dword [[IDX:v[0-9]+]]
+; GCN: buffer_load_dword [[VEC:v[0-9]+]]
+; GCN: v_lshrrev_b32_e32 [[ELT:v[0-9]+]], 16, [[VEC]]
+define void @extract_vector_elt_v2i16_dynamic_vgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext
+ %idx = load volatile i32, i32 addrspace(1)* %gep
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vec.ptr
+ %elt = extractelement <2 x i16> %vec, i32 %idx
+ store i16 %elt, i16 addrspace(1)* %out.gep, align 2
+ ret void
+}
+
; GCN-LABEL: {{^}}extract_vector_elt_v3i16:
; GCN: buffer_load_ushort
; GCN: buffer_store_short
@@ -80,4 +108,7 @@ define void @dynamic_extract_vector_elt_
ret void
}
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
More information about the llvm-commits
mailing list