[llvm] r326907 - Revert "[AMDGPU] Widened vector length for global/constant address space."

Tom Stellard via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 7 09:57:15 PST 2018


On 03/07/2018 08:55 AM, Farhana Aleen via llvm-commits wrote:
> Author: faaleen
> Date: Wed Mar  7 08:55:27 2018
> New Revision: 326907
> 
> URL: http://llvm.org/viewvc/llvm-project?rev=326907&view=rev
> Log:
> Revert "[AMDGPU] Widened vector length for global/constant address space."
> 
> This reverts commit ce988cc100dc65e7c6c727aff31ceb99231cab03.
> 

Why was this commit reverted?  Also, it is preferred to use the git-svnrevert
script in uitls/git-svn to do reverts.

-Tom

> Removed:
>     llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll
> Modified:
>     llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
>     llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
>     llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
>     llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
>     llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll
>     llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll
> 
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=326907&r1=326906&r2=326907&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Wed Mar  7 08:55:27 2018
> @@ -234,38 +234,12 @@ unsigned AMDGPUTTIImpl::getMinVectorRegi
>    return 32;
>  }
>  
> -unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
> -                                            unsigned ChainSizeInBytes,
> -                                            VectorType *VecTy) const {
> -  unsigned VecRegBitWidth = VF * LoadSize;
> -  if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
> -    // TODO: Support element-size less than 32bit?
> -    return 128 / LoadSize;
> -
> -  return VF;
> -}
> -
> -unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
> -                                             unsigned ChainSizeInBytes,
> -                                             VectorType *VecTy) const {
> -  unsigned VecRegBitWidth = VF * StoreSize;
> -  if (VecRegBitWidth > 128)
> -    return 128 / StoreSize;
> -
> -  return VF;
> -}
> -
>  unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
>    AMDGPUAS AS = ST->getAMDGPUAS();
>    if (AddrSpace == AS.GLOBAL_ADDRESS ||
>        AddrSpace == AS.CONSTANT_ADDRESS ||
> -      AddrSpace == AS.CONSTANT_ADDRESS_32BIT) {
> -    if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
> -      return 128;
> -    return 512;
> -  }
> -
> -  if (AddrSpace == AS.FLAT_ADDRESS)
> +      AddrSpace == AS.CONSTANT_ADDRESS_32BIT ||
> +      AddrSpace == AS.FLAT_ADDRESS)
>      return 128;
>    if (AddrSpace == AS.LOCAL_ADDRESS ||
>        AddrSpace == AS.REGION_ADDRESS)
> @@ -276,8 +250,8 @@ unsigned AMDGPUTTIImpl::getLoadStoreVecR
>    if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
>        (AddrSpace == AS.PARAM_D_ADDRESS ||
>        AddrSpace == AS.PARAM_I_ADDRESS ||
> -       (AddrSpace >= AS.CONSTANT_BUFFER_0 &&
> -        AddrSpace <= AS.CONSTANT_BUFFER_15)))
> +      (AddrSpace >= AS.CONSTANT_BUFFER_0 &&
> +      AddrSpace <= AS.CONSTANT_BUFFER_15)))
>      return 128;
>    llvm_unreachable("unhandled address space");
>  }
> 
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h?rev=326907&r1=326906&r2=326907&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h Wed Mar  7 08:55:27 2018
> @@ -118,12 +118,6 @@ public:
>    unsigned getNumberOfRegisters(bool Vector) const;
>    unsigned getRegisterBitWidth(bool Vector) const;
>    unsigned getMinVectorRegisterBitWidth() const;
> -  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
> -                               unsigned ChainSizeInBytes,
> -                               VectorType *VecTy) const;
> -  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
> -                                unsigned ChainSizeInBytes,
> -                                VectorType *VecTy) const;
>    unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
>  
>    bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
> 
> Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=326907&r1=326906&r2=326907&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Wed Mar  7 08:55:27 2018
> @@ -3454,10 +3454,6 @@ bool SITargetLowering::isFMAFasterThanFM
>    return false;
>  }
>  
> -static bool isDwordAligned(unsigned Alignment) {
> -  return Alignment % 4 == 0;
> -}
> -
>  //===----------------------------------------------------------------------===//
>  // Custom DAG Lowering Operations
>  //===----------------------------------------------------------------------===//
> @@ -5357,10 +5353,9 @@ SDValue SITargetLowering::LowerLOAD(SDVa
>    assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
>           "Custom lowering for non-i32 vectors hasn't been implemented.");
>  
> -  unsigned Alignment = Load->getAlignment();
>    unsigned AS = Load->getAddressSpace();
>    if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
> -                          AS, Alignment)) {
> +                          AS, Load->getAlignment())) {
>      SDValue Ops[2];
>      std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
>      return DAG.getMergeValues(Ops, DL);
> @@ -5388,8 +5383,7 @@ SDValue SITargetLowering::LowerLOAD(SDVa
>        AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
>        AS == AMDGPUASI.GLOBAL_ADDRESS) {
>      if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
> -        !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
> -        isDwordAligned(Alignment))
> +        !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
>        return SDValue();
>      // Non-uniform loads will be selected to MUBUF instructions, so they
>      // have the same legalization requirements as global and private
> 
> Modified: llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp?rev=326907&r1=326906&r2=326907&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp (original)
> +++ llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp Wed Mar  7 08:55:27 2018
> @@ -666,12 +666,8 @@ Vectorizer::collectInstructions(BasicBlo
>        unsigned AS = Ptr->getType()->getPointerAddressSpace();
>        unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
>  
> -      unsigned VF = VecRegSize / TySize;
> -      VectorType *VecTy = dyn_cast<VectorType>(Ty);
> -
>        // No point in looking at these if they're too big to vectorize.
> -      if (TySize > VecRegSize / 2 ||
> -          (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
> +      if (TySize > VecRegSize / 2)
>          continue;
>  
>        // Make sure all the users of a vector are constant-index extracts.
> @@ -713,12 +709,8 @@ Vectorizer::collectInstructions(BasicBlo
>        unsigned AS = Ptr->getType()->getPointerAddressSpace();
>        unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
>  
> -      unsigned VF = VecRegSize / TySize;
> -      VectorType *VecTy = dyn_cast<VectorType>(Ty);
> -
>        // No point in looking at these if they're too big to vectorize.
> -      if (TySize > VecRegSize / 2 ||
> -          (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
> +      if (TySize > VecRegSize / 2)
>          continue;
>  
>        if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) {
> 
> Removed: llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll?rev=326906&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll (original)
> +++ llvm/trunk/test/CodeGen/AMDGPU/load-constant-f32.ll (removed)
> @@ -1,37 +0,0 @@
> -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
> -; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
> -
> -; Tests whether a load chain of 8 constants gets vectorized into a wider load.
> -; FUNC-LABEL: {{^}}constant_load_v8f32:
> -; GCN: s_load_dwordx8
> -; EG: VTX_READ_128
> -; EG: VTX_READ_128
> -define amdgpu_kernel void @constant_load_v8f32(float addrspace(4)* noalias nocapture readonly %weights, float addrspace(1)* noalias nocapture %out_ptr) {
> -entry:
> -  %out_ptr.promoted = load float, float addrspace(1)* %out_ptr, align 4
> -  %tmp = load float, float addrspace(4)* %weights, align 4
> -  %add = fadd float %tmp, %out_ptr.promoted
> -  %arrayidx.1 = getelementptr inbounds float, float addrspace(4)* %weights, i64 1
> -  %tmp1 = load float, float addrspace(4)* %arrayidx.1, align 4
> -  %add.1 = fadd float %tmp1, %add
> -  %arrayidx.2 = getelementptr inbounds float, float addrspace(4)* %weights, i64 2
> -  %tmp2 = load float, float addrspace(4)* %arrayidx.2, align 4
> -  %add.2 = fadd float %tmp2, %add.1
> -  %arrayidx.3 = getelementptr inbounds float, float addrspace(4)* %weights, i64 3
> -  %tmp3 = load float, float addrspace(4)* %arrayidx.3, align 4
> -  %add.3 = fadd float %tmp3, %add.2
> -  %arrayidx.4 = getelementptr inbounds float, float addrspace(4)* %weights, i64 4
> -  %tmp4 = load float, float addrspace(4)* %arrayidx.4, align 4
> -  %add.4 = fadd float %tmp4, %add.3
> -  %arrayidx.5 = getelementptr inbounds float, float addrspace(4)* %weights, i64 5
> -  %tmp5 = load float, float addrspace(4)* %arrayidx.5, align 4
> -  %add.5 = fadd float %tmp5, %add.4
> -  %arrayidx.6 = getelementptr inbounds float, float addrspace(4)* %weights, i64 6
> -  %tmp6 = load float, float addrspace(4)* %arrayidx.6, align 4
> -  %add.6 = fadd float %tmp6, %add.5
> -  %arrayidx.7 = getelementptr inbounds float, float addrspace(4)* %weights, i64 7
> -  %tmp7 = load float, float addrspace(4)* %arrayidx.7, align 4
> -  %add.7 = fadd float %tmp7, %add.6
> -  store float %add.7, float addrspace(1)* %out_ptr, align 4
> -  ret void
> -}
> \ No newline at end of file
> 
> Modified: llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll?rev=326907&r1=326906&r2=326907&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll (original)
> +++ llvm/trunk/test/CodeGen/AMDGPU/load-constant-f64.ll Wed Mar  7 08:55:27 2018
> @@ -13,36 +13,3 @@ define amdgpu_kernel void @constant_load
>  }
>  
>  attributes #0 = { nounwind }
> -
> -; Tests whether a load-chain of 8 constants of 64bit each gets vectorized into a wider load.
> -; FUNC-LABEL: {{^}}constant_load_2v4f64:
> -; GCN: s_load_dwordx16
> -define amdgpu_kernel void @constant_load_2v4f64(double addrspace(4)* noalias nocapture readonly %weights, double addrspace(1)* noalias nocapture %out_ptr) {
> -entry:
> -  %out_ptr.promoted = load double, double addrspace(1)* %out_ptr, align 4
> -  %tmp = load double, double addrspace(4)* %weights, align 4
> -  %add = fadd double %tmp, %out_ptr.promoted
> -  %arrayidx.1 = getelementptr inbounds double, double addrspace(4)* %weights, i64 1
> -  %tmp1 = load double, double addrspace(4)* %arrayidx.1, align 4
> -  %add.1 = fadd double %tmp1, %add
> -  %arrayidx.2 = getelementptr inbounds double, double addrspace(4)* %weights, i64 2
> -  %tmp2 = load double, double addrspace(4)* %arrayidx.2, align 4
> -  %add.2 = fadd double %tmp2, %add.1
> -  %arrayidx.3 = getelementptr inbounds double, double addrspace(4)* %weights, i64 3
> -  %tmp3 = load double, double addrspace(4)* %arrayidx.3, align 4
> -  %add.3 = fadd double %tmp3, %add.2
> -  %arrayidx.4 = getelementptr inbounds double, double addrspace(4)* %weights, i64 4
> -  %tmp4 = load double, double addrspace(4)* %arrayidx.4, align 4
> -  %add.4 = fadd double %tmp4, %add.3
> -  %arrayidx.5 = getelementptr inbounds double, double addrspace(4)* %weights, i64 5
> -  %tmp5 = load double, double addrspace(4)* %arrayidx.5, align 4
> -  %add.5 = fadd double %tmp5, %add.4
> -  %arrayidx.6 = getelementptr inbounds double, double addrspace(4)* %weights, i64 6
> -  %tmp6 = load double, double addrspace(4)* %arrayidx.6, align 4
> -  %add.6 = fadd double %tmp6, %add.5
> -  %arrayidx.7 = getelementptr inbounds double, double addrspace(4)* %weights, i64 7
> -  %tmp7 = load double, double addrspace(4)* %arrayidx.7, align 4
> -  %add.7 = fadd double %tmp7, %add.6
> -  store double %add.7, double addrspace(1)* %out_ptr, align 4
> -  ret void
> -}
> 
> Modified: llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll?rev=326907&r1=326906&r2=326907&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll (original)
> +++ llvm/trunk/test/CodeGen/AMDGPU/waitcnt-looptest.ll Wed Mar  7 08:55:27 2018
> @@ -1,4 +1,4 @@
> -; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -amdgpu-load-store-vectorizer=0 | FileCheck --check-prefix=GCN %s
> +; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s
>  
>  ; Check that the waitcnt insertion algorithm correctly propagates wait counts
>  ; from before a loop to the loop header.
> 
> 
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
> 



More information about the llvm-commits mailing list