[llvm] r325643 - Revert "[AMDGPU] Increased vector length for global/constant loads."
Konstantin Zhuravlyov via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 20 15:30:21 PST 2018
Author: kzhuravl
Date: Tue Feb 20 15:30:21 2018
New Revision: 325643
URL: http://llvm.org/viewvc/llvm-project?rev=325643&view=rev
Log:
Revert "[AMDGPU] Increased vector length for global/constant loads."
https://reviews.llvm.org/rL325518
It breaks following OpenCL conformance tests:
- Basic - parameter_types
- Basic - vload_private
Removed:
llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll
Modified:
llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll
llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=325643&r1=325642&r2=325643&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Tue Feb 20 15:30:21 2018
@@ -233,38 +233,12 @@ unsigned AMDGPUTTIImpl::getMinVectorRegi
return 32;
}
-unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
- unsigned ChainSizeInBytes,
- VectorType *VecTy) const {
- unsigned VecRegBitWidth = VF * LoadSize;
- if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
- // TODO: Support element-size less than 32bit?
- return 128 / LoadSize;
-
- return VF;
-}
-
-unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
- unsigned ChainSizeInBytes,
- VectorType *VecTy) const {
- unsigned VecRegBitWidth = VF * StoreSize;
- if (VecRegBitWidth > 128)
- return 128 / StoreSize;
-
- return VF;
-}
-
unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
AMDGPUAS AS = ST->getAMDGPUAS();
if (AddrSpace == AS.GLOBAL_ADDRESS ||
AddrSpace == AS.CONSTANT_ADDRESS ||
- AddrSpace == AS.CONSTANT_ADDRESS_32BIT) {
- if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
- return 128;
- return 512;
- }
-
- if (AddrSpace == AS.FLAT_ADDRESS)
+ AddrSpace == AS.CONSTANT_ADDRESS_32BIT ||
+ AddrSpace == AS.FLAT_ADDRESS)
return 128;
if (AddrSpace == AS.LOCAL_ADDRESS ||
AddrSpace == AS.REGION_ADDRESS)
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h?rev=325643&r1=325642&r2=325643&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h Tue Feb 20 15:30:21 2018
@@ -118,12 +118,6 @@ public:
unsigned getNumberOfRegisters(bool Vector) const;
unsigned getRegisterBitWidth(bool Vector) const;
unsigned getMinVectorRegisterBitWidth() const;
- unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
- unsigned ChainSizeInBytes,
- VectorType *VecTy) const;
- unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
- unsigned ChainSizeInBytes,
- VectorType *VecTy) const;
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
Removed: llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll?rev=325642&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll (removed)
@@ -1,37 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-
-; Tests whether a load chain of 8 constants gets vectorized into a wider load.
-; FUNC-LABEL: {{^}}constant_load_v8f32:
-; GCN: s_load_dwordx8
-; EG: VTX_READ_128
-; EG: VTX_READ_128
-define amdgpu_kernel void @constant_load_v8f32(float addrspace(4)* noalias nocapture readonly %weights, float addrspace(1)* noalias nocapture %out_ptr) {
-entry:
- %out_ptr.promoted = load float, float addrspace(1)* %out_ptr, align 4
- %tmp = load float, float addrspace(4)* %weights, align 4
- %add = fadd float %tmp, %out_ptr.promoted
- %arrayidx.1 = getelementptr inbounds float, float addrspace(4)* %weights, i64 1
- %tmp1 = load float, float addrspace(4)* %arrayidx.1, align 4
- %add.1 = fadd float %tmp1, %add
- %arrayidx.2 = getelementptr inbounds float, float addrspace(4)* %weights, i64 2
- %tmp2 = load float, float addrspace(4)* %arrayidx.2, align 4
- %add.2 = fadd float %tmp2, %add.1
- %arrayidx.3 = getelementptr inbounds float, float addrspace(4)* %weights, i64 3
- %tmp3 = load float, float addrspace(4)* %arrayidx.3, align 4
- %add.3 = fadd float %tmp3, %add.2
- %arrayidx.4 = getelementptr inbounds float, float addrspace(4)* %weights, i64 4
- %tmp4 = load float, float addrspace(4)* %arrayidx.4, align 4
- %add.4 = fadd float %tmp4, %add.3
- %arrayidx.5 = getelementptr inbounds float, float addrspace(4)* %weights, i64 5
- %tmp5 = load float, float addrspace(4)* %arrayidx.5, align 4
- %add.5 = fadd float %tmp5, %add.4
- %arrayidx.6 = getelementptr inbounds float, float addrspace(4)* %weights, i64 6
- %tmp6 = load float, float addrspace(4)* %arrayidx.6, align 4
- %add.6 = fadd float %tmp6, %add.5
- %arrayidx.7 = getelementptr inbounds float, float addrspace(4)* %weights, i64 7
- %tmp7 = load float, float addrspace(4)* %arrayidx.7, align 4
- %add.7 = fadd float %tmp7, %add.6
- store float %add.7, float addrspace(1)* %out_ptr, align 4
- ret void
-}
Modified: llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll?rev=325643&r1=325642&r2=325643&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll Tue Feb 20 15:30:21 2018
@@ -13,36 +13,3 @@ define amdgpu_kernel void @constant_load
}
attributes #0 = { nounwind }
-
-; Tests whether a load-chain of 8 constants of 64bit each gets vectorized into a wider load.
-; FUNC-LABEL: {{^}}constant_load_2v4f64:
-; GCN: s_load_dwordx16
-define amdgpu_kernel void @constant_load_2v4f64(double addrspace(4)* noalias nocapture readonly %weights, double addrspace(1)* noalias nocapture %out_ptr) {
-entry:
- %out_ptr.promoted = load double, double addrspace(1)* %out_ptr, align 4
- %tmp = load double, double addrspace(4)* %weights, align 4
- %add = fadd double %tmp, %out_ptr.promoted
- %arrayidx.1 = getelementptr inbounds double, double addrspace(4)* %weights, i64 1
- %tmp1 = load double, double addrspace(4)* %arrayidx.1, align 4
- %add.1 = fadd double %tmp1, %add
- %arrayidx.2 = getelementptr inbounds double, double addrspace(4)* %weights, i64 2
- %tmp2 = load double, double addrspace(4)* %arrayidx.2, align 4
- %add.2 = fadd double %tmp2, %add.1
- %arrayidx.3 = getelementptr inbounds double, double addrspace(4)* %weights, i64 3
- %tmp3 = load double, double addrspace(4)* %arrayidx.3, align 4
- %add.3 = fadd double %tmp3, %add.2
- %arrayidx.4 = getelementptr inbounds double, double addrspace(4)* %weights, i64 4
- %tmp4 = load double, double addrspace(4)* %arrayidx.4, align 4
- %add.4 = fadd double %tmp4, %add.3
- %arrayidx.5 = getelementptr inbounds double, double addrspace(4)* %weights, i64 5
- %tmp5 = load double, double addrspace(4)* %arrayidx.5, align 4
- %add.5 = fadd double %tmp5, %add.4
- %arrayidx.6 = getelementptr inbounds double, double addrspace(4)* %weights, i64 6
- %tmp6 = load double, double addrspace(4)* %arrayidx.6, align 4
- %add.6 = fadd double %tmp6, %add.5
- %arrayidx.7 = getelementptr inbounds double, double addrspace(4)* %weights, i64 7
- %tmp7 = load double, double addrspace(4)* %arrayidx.7, align 4
- %add.7 = fadd double %tmp7, %add.6
- store double %add.7, double addrspace(1)* %out_ptr, align 4
- ret void
-}
Modified: llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll?rev=325643&r1=325642&r2=325643&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll Tue Feb 20 15:30:21 2018
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -amdgpu-load-store-vectorizer=0 | FileCheck --check-prefix=GCN %s
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s
; Check that the waitcnt insertion algorithm correctly propagates wait counts
; from before a loop to the loop header.
More information about the llvm-commits
mailing list