[llvm] r238108 - Add target hook to allow merging stores of nonzero constants

Tom Stellard tom at stellard.net
Mon Jun 8 13:11:26 PDT 2015


Hi Matt,

These seems to have broken the attached test case.  It looks like
the stores are merged, but the values are replaced with zero vectors.

-Tom

On Sun, May 24, 2015 at 12:51:28AM -0000, Matt Arsenault wrote:
> Author: arsenm
> Date: Sat May 23 19:51:27 2015
> New Revision: 238108
> 
> URL: http://llvm.org/viewvc/llvm-project?rev=238108&view=rev
> Log:
> Add target hook to allow merging stores of nonzero constants
> 
> On GPU targets, materializing constants is cheap and stores are
> expensive, so only doing this for zero vectors was silly.
> 
> Most of the new testcases aren't optimally merged, and are for
> later improvements.
> 
> Added:
>     llvm/trunk/test/CodeGen/R600/merge-stores.ll
> Modified:
>     llvm/trunk/include/llvm/Target/TargetLowering.h
>     llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
>     llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
>     llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
>     llvm/trunk/test/CodeGen/R600/store.ll
> 
> Modified: llvm/trunk/include/llvm/Target/TargetLowering.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetLowering.h?rev=238108&r1=238107&r2=238108&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/Target/TargetLowering.h (original)
> +++ llvm/trunk/include/llvm/Target/TargetLowering.h Sat May 23 19:51:27 2015
> @@ -267,6 +267,15 @@ public:
>      return true;
>    }
>  
> +  /// Return true if it is expected to be cheaper to do a store of a non-zero
> +  /// vector constant with the given size and type for the address space than to
> +  /// store the individual scalar element constants.
> +  virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
> +                                            unsigned NumElem,
> +                                            unsigned AddrSpace) const {
> +    return false;
> +  }
> +
>    /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
>    virtual bool isCheapToSpeculateCttz() const {
>      return false;
> 
> Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=238108&r1=238107&r2=238108&view=diff
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
> +++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sat May 23 19:51:27 2015
> @@ -10892,10 +10892,17 @@ bool DAGCombiner::MergeConsecutiveStores
>        }
>      }
>  
> -    // We only use vectors if the constant is known to be zero and the
> -    // function is not marked with the noimplicitfloat attribute.
> -    if (NonZero || NoVectors)
> +
> +    // We only use vectors if the constant is known to be zero or the target
> +    // allows it and the function is not marked with the noimplicitfloat
> +    // attribute.
> +    if (NoVectors) {
> +      LastLegalVectorType = 0;
> +    } else if (NonZero && !TLI.storeOfVectorConstantIsCheap(MemVT,
> +                                                            LastLegalVectorType,
> +                                                            FirstStoreAS)) {
>        LastLegalVectorType = 0;
> +    }
>  
>      // Check if we found a legal integer type to store.
>      if (LastLegalType == 0 && LastLegalVectorType == 0)
> 
> Modified: llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp?rev=238108&r1=238107&r2=238108&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp Sat May 23 19:51:27 2015
> @@ -509,6 +509,12 @@ bool AMDGPUTargetLowering::isFNegFree(EV
>    return VT == MVT::f32 || VT == MVT::f64;
>  }
>  
> +bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
> +                                                         unsigned NumElem,
> +                                                         unsigned AS) const {
> +  return true;
> +}
> +
>  bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
>    // Truncate is just accessing a subregister.
>    return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
> 
> Modified: llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h?rev=238108&r1=238107&r2=238108&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h (original)
> +++ llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h Sat May 23 19:51:27 2015
> @@ -133,6 +133,10 @@ public:
>                               EVT ExtVT) const override;
>  
>    bool isLoadBitCastBeneficial(EVT, EVT) const override;
> +
> +  bool storeOfVectorConstantIsCheap(EVT MemVT,
> +                                    unsigned NumElem,
> +                                    unsigned AS) const override;
>    bool isCheapToSpeculateCttz() const override;
>    bool isCheapToSpeculateCtlz() const override;
>  
> 
> Added: llvm/trunk/test/CodeGen/R600/merge-stores.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/merge-stores.ll?rev=238108&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/R600/merge-stores.ll (added)
> +++ llvm/trunk/test/CodeGen/R600/merge-stores.ll Sat May 23 19:51:27 2015
> @@ -0,0 +1,536 @@
> +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
> +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
> +
> +; Run with devices with different unaligned load restrictions.
> +
> +; TODO: Vector element tests
> +; TODO: Non-zero base offset for load and store combinations
> +; TODO: Same base addrspacecasted
> +
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i8:
> +; GCN: buffer_store_byte
> +; GCN: buffer_store_byte
> +; GCN: s_endpgm
> +define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
> +
> +  store i8 123, i8 addrspace(1)* %out.gep.1
> +  store i8 456, i8 addrspace(1)* %out, align 2
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i8_natural_align:
> +; GCN: buffer_store_byte
> +; GCN: buffer_store_byte
> +; GCN: s_endpgm
> +define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
> +
> +  store i8 123, i8 addrspace(1)* %out.gep.1
> +  store i8 456, i8 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i16:
> +; GCN: buffer_store_dword v
> +define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
> +
> +  store i16 123, i16 addrspace(1)* %out.gep.1
> +  store i16 456, i16 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_0_i16:
> +; GCN: buffer_store_dword v
> +define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
> +
> +  store i16 0, i16 addrspace(1)* %out.gep.1
> +  store i16 0, i16 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i16_natural_align:
> +; GCN: buffer_store_short
> +; GCN: buffer_store_short
> +; GCN: s_endpgm
> +define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
> +
> +  store i16 123, i16 addrspace(1)* %out.gep.1
> +  store i16 456, i16 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i32:
> +; SI-DAG: s_movk_i32 [[SLO:s[0-9]+]], 0x1c8
> +; SI-DAG: s_movk_i32 [[SHI:s[0-9]+]], 0x7b
> +; SI-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], [[SLO]]
> +; SI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[SHI]]
> +; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
> +define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +
> +  store i32 123, i32 addrspace(1)* %out.gep.1
> +  store i32 456, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i32_f32:
> +; GCN: buffer_store_dwordx2
> +define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.1.bc = bitcast i32 addrspace(1)* %out.gep.1 to float addrspace(1)*
> +  store float 1.0, float addrspace(1)* %out.gep.1.bc
> +  store i32 456, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_f32_i32:
> +; GCN: buffer_store_dwordx2
> +define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
> +  %out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)*
> +  store i32 123, i32 addrspace(1)* %out.gep.1.bc
> +  store float 4.0, float addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_constants_i32:
> +; GCN: buffer_store_dwordx4
> +define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
> +
> +  store i32 123, i32 addrspace(1)* %out.gep.1
> +  store i32 456, i32 addrspace(1)* %out.gep.2
> +  store i32 333, i32 addrspace(1)* %out.gep.3
> +  store i32 1234, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_constants_f32_order:
> +; XGCN: buffer_store_dwordx4
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dwordx2 v
> +define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
> +
> +  store float 8.0, float addrspace(1)* %out
> +  store float 1.0, float addrspace(1)* %out.gep.1
> +  store float 2.0, float addrspace(1)* %out.gep.2
> +  store float 4.0, float addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; First store is out of order. Because of order of combines, the
> +; consecutive store fails because only some of the stores have been
> +; replaced with integer constant stores, and then won't merge because
> +; the types are different.
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_constants_f32:
> +; XGCN: buffer_store_dwordx4
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
> +
> +  store float 1.0, float addrspace(1)* %out.gep.1
> +  store float 2.0, float addrspace(1)* %out.gep.2
> +  store float 4.0, float addrspace(1)* %out.gep.3
> +  store float 8.0, float addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_3_constants_i32:
> +; SI-DAG: buffer_store_dwordx2
> +; SI-DAG: buffer_store_dword
> +; SI-NOT: buffer_store_dword
> +; GCN: s_endpgm
> +define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +
> +  store i32 123, i32 addrspace(1)* %out.gep.1
> +  store i32 456, i32 addrspace(1)* %out.gep.2
> +  store i32 1234, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_constants_i64:
> +; XGCN: buffer_store_dwordx4
> +; GCN: buffer_store_dwordx2
> +; GCN: buffer_store_dwordx2
> +define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
> +
> +  store i64 123, i64 addrspace(1)* %out.gep.1
> +  store i64 456, i64 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_constants_i64:
> +; XGCN: buffer_store_dwordx4
> +; XGCN: buffer_store_dwordx4
> +
> +; GCN: buffer_store_dwordx2
> +; GCN: buffer_store_dwordx2
> +; GCN: buffer_store_dwordx2
> +; GCN: buffer_store_dwordx2
> +define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
> +  %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
> +  %out.gep.2 = getelementptr i64, i64 addrspace(1)* %out, i64 2
> +  %out.gep.3 = getelementptr i64, i64 addrspace(1)* %out, i64 3
> +
> +  store i64 123, i64 addrspace(1)* %out.gep.1
> +  store i64 456, i64 addrspace(1)* %out.gep.2
> +  store i64 333, i64 addrspace(1)* %out.gep.3
> +  store i64 1234, i64 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_adjacent_loads_i32:
> +; GCN: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
> +; GCN: buffer_store_dwordx2 [[LOAD]]
> +define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
> +
> +  %lo = load i32, i32 addrspace(1)* %in
> +  %hi = load i32, i32 addrspace(1)* %in.gep.1
> +
> +  store i32 %lo, i32 addrspace(1)* %out
> +  store i32 %hi, i32 addrspace(1)* %out.gep.1
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_adjacent_loads_i32_nonzero_base:
> +; GCN: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
> +; GCN: buffer_store_dwordx2 [[LOAD]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
> +define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 2
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 3
> +
> +  %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 3
> +  %lo = load i32, i32 addrspace(1)* %in.gep.0
> +  %hi = load i32, i32 addrspace(1)* %in.gep.1
> +
> +  store i32 %lo, i32 addrspace(1)* %out.gep.0
> +  store i32 %hi, i32 addrspace(1)* %out.gep.1
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_2_adjacent_loads_shuffle_i32:
> +; GCN: buffer_load_dword v
> +; GCN: buffer_load_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
> +
> +  %lo = load i32, i32 addrspace(1)* %in
> +  %hi = load i32, i32 addrspace(1)* %in.gep.1
> +
> +  store i32 %hi, i32 addrspace(1)* %out
> +  store i32 %lo, i32 addrspace(1)* %out.gep.1
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_i32:
> +; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
> +; GCN: buffer_store_dwordx4 [[LOAD]]
> +define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
> +  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
> +  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3
> +
> +  %x = load i32, i32 addrspace(1)* %in
> +  %y = load i32, i32 addrspace(1)* %in.gep.1
> +  %z = load i32, i32 addrspace(1)* %in.gep.2
> +  %w = load i32, i32 addrspace(1)* %in.gep.3
> +
> +  store i32 %x, i32 addrspace(1)* %out
> +  store i32 %y, i32 addrspace(1)* %out.gep.1
> +  store i32 %z, i32 addrspace(1)* %out.gep.2
> +  store i32 %w, i32 addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_3_adjacent_loads_i32:
> +; SI-DAG: buffer_load_dwordx2
> +; SI-DAG: buffer_load_dword v
> +; GCN: s_waitcnt
> +; SI-DAG: buffer_store_dword v
> +; SI-DAG: buffer_store_dwordx2 v
> +; GCN: s_endpgm
> +define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
> +  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
> +
> +  %x = load i32, i32 addrspace(1)* %in
> +  %y = load i32, i32 addrspace(1)* %in.gep.1
> +  %z = load i32, i32 addrspace(1)* %in.gep.2
> +
> +  store i32 %x, i32 addrspace(1)* %out
> +  store i32 %y, i32 addrspace(1)* %out.gep.1
> +  store i32 %z, i32 addrspace(1)* %out.gep.2
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_f32:
> +; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
> +; GCN: buffer_store_dwordx4 [[LOAD]]
> +define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
> +  %in.gep.1 = getelementptr float, float addrspace(1)* %in, i32 1
> +  %in.gep.2 = getelementptr float, float addrspace(1)* %in, i32 2
> +  %in.gep.3 = getelementptr float, float addrspace(1)* %in, i32 3
> +
> +  %x = load float, float addrspace(1)* %in
> +  %y = load float, float addrspace(1)* %in.gep.1
> +  %z = load float, float addrspace(1)* %in.gep.2
> +  %w = load float, float addrspace(1)* %in.gep.3
> +
> +  store float %x, float addrspace(1)* %out
> +  store float %y, float addrspace(1)* %out.gep.1
> +  store float %z, float addrspace(1)* %out.gep.2
> +  store float %w, float addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_i32_nonzero_base:
> +; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44
> +; GCN: buffer_store_dwordx4 [[LOAD]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:28
> +define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 11
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 12
> +  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 13
> +  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 14
> +  %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i32 7
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 8
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 9
> +  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 10
> +
> +  %x = load i32, i32 addrspace(1)* %in.gep.0
> +  %y = load i32, i32 addrspace(1)* %in.gep.1
> +  %z = load i32, i32 addrspace(1)* %in.gep.2
> +  %w = load i32, i32 addrspace(1)* %in.gep.3
> +
> +  store i32 %x, i32 addrspace(1)* %out.gep.0
> +  store i32 %y, i32 addrspace(1)* %out.gep.1
> +  store i32 %z, i32 addrspace(1)* %out.gep.2
> +  store i32 %w, i32 addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_inverse_i32:
> +; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
> +; GCN: s_barrier
> +; GCN: buffer_store_dwordx4 [[LOAD]]
> +define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
> +  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
> +  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3
> +
> +  %x = load i32, i32 addrspace(1)* %in
> +  %y = load i32, i32 addrspace(1)* %in.gep.1
> +  %z = load i32, i32 addrspace(1)* %in.gep.2
> +  %w = load i32, i32 addrspace(1)* %in.gep.3
> +
> +  ; Make sure the barrier doesn't stop this
> +  tail call void @llvm.AMDGPU.barrier.local() #1
> +
> +  store i32 %w, i32 addrspace(1)* %out.gep.3
> +  store i32 %z, i32 addrspace(1)* %out.gep.2
> +  store i32 %y, i32 addrspace(1)* %out.gep.1
> +  store i32 %x, i32 addrspace(1)* %out
> +
> +  ret void
> +}
> +
> +; TODO: Re-packing of loaded register required. Maybe an IR pass
> +; should catch this?
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_shuffle_i32:
> +; GCN: buffer_load_dword v
> +; GCN: buffer_load_dword v
> +; GCN: buffer_load_dword v
> +; GCN: buffer_load_dword v
> +; GCN: s_barrier
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
> +  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
> +  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
> +  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3
> +
> +  %x = load i32, i32 addrspace(1)* %in
> +  %y = load i32, i32 addrspace(1)* %in.gep.1
> +  %z = load i32, i32 addrspace(1)* %in.gep.2
> +  %w = load i32, i32 addrspace(1)* %in.gep.3
> +
> +  ; Make sure the barrier doesn't stop this
> +  tail call void @llvm.AMDGPU.barrier.local() #1
> +
> +  store i32 %w, i32 addrspace(1)* %out
> +  store i32 %z, i32 addrspace(1)* %out.gep.1
> +  store i32 %y, i32 addrspace(1)* %out.gep.2
> +  store i32 %x, i32 addrspace(1)* %out.gep.3
> +
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_i8:
> +; GCN: buffer_load_dword [[LOAD:v[0-9]+]]
> +; GCN: buffer_store_dword [[LOAD]]
> +; GCN: s_endpgm
> +define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
> +  %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
> +  %out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
> +  %in.gep.1 = getelementptr i8, i8 addrspace(1)* %in, i8 1
> +  %in.gep.2 = getelementptr i8, i8 addrspace(1)* %in, i8 2
> +  %in.gep.3 = getelementptr i8, i8 addrspace(1)* %in, i8 3
> +
> +  %x = load i8, i8 addrspace(1)* %in, align 4
> +  %y = load i8, i8 addrspace(1)* %in.gep.1
> +  %z = load i8, i8 addrspace(1)* %in.gep.2
> +  %w = load i8, i8 addrspace(1)* %in.gep.3
> +
> +  store i8 %x, i8 addrspace(1)* %out, align 4
> +  store i8 %y, i8 addrspace(1)* %out.gep.1
> +  store i8 %z, i8 addrspace(1)* %out.gep.2
> +  store i8 %w, i8 addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_i8_natural_align:
> +; GCN: buffer_load_ubyte
> +; GCN: buffer_load_ubyte
> +; GCN: buffer_load_ubyte
> +; GCN: buffer_load_ubyte
> +; GCN: buffer_store_byte
> +; GCN: buffer_store_byte
> +; GCN: buffer_store_byte
> +; GCN: buffer_store_byte
> +; GCN: s_endpgm
> +define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
> +  %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
> +  %out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
> +  %in.gep.1 = getelementptr i8, i8 addrspace(1)* %in, i8 1
> +  %in.gep.2 = getelementptr i8, i8 addrspace(1)* %in, i8 2
> +  %in.gep.3 = getelementptr i8, i8 addrspace(1)* %in, i8 3
> +
> +  %x = load i8, i8 addrspace(1)* %in
> +  %y = load i8, i8 addrspace(1)* %in.gep.1
> +  %z = load i8, i8 addrspace(1)* %in.gep.2
> +  %w = load i8, i8 addrspace(1)* %in.gep.3
> +
> +  store i8 %x, i8 addrspace(1)* %out
> +  store i8 %y, i8 addrspace(1)* %out.gep.1
> +  store i8 %z, i8 addrspace(1)* %out.gep.2
> +  store i8 %w, i8 addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; This works once AA is enabled on the subtarget
> +; GCN-LABEL: {{^}}merge_global_store_4_vector_elts_loads_v4i32:
> +; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
> +; XGCN: buffer_store_dwordx4 [[LOAD]]
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +; GCN: buffer_store_dword v
> +define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
> +  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
> +  %vec = load <4 x i32>, <4 x i32> addrspace(1)* %in
> +
> +  %x = extractelement <4 x i32> %vec, i32 0
> +  %y = extractelement <4 x i32> %vec, i32 1
> +  %z = extractelement <4 x i32> %vec, i32 2
> +  %w = extractelement <4 x i32> %vec, i32 3
> +
> +  store i32 %x, i32 addrspace(1)* %out
> +  store i32 %y, i32 addrspace(1)* %out.gep.1
> +  store i32 %z, i32 addrspace(1)* %out.gep.2
> +  store i32 %w, i32 addrspace(1)* %out.gep.3
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_local_store_2_constants_i8:
> +; GCN: ds_write_b8
> +; GCN: ds_write_b8
> +; GCN: s_endpgm
> +define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
> +  %out.gep.1 = getelementptr i8, i8 addrspace(3)* %out, i32 1
> +
> +  store i8 123, i8 addrspace(3)* %out.gep.1
> +  store i8 456, i8 addrspace(3)* %out, align 2
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_local_store_2_constants_i32:
> +; GCN-DAG: s_movk_i32 [[SLO:s[0-9]+]], 0x1c8
> +; GCN-DAG: s_movk_i32 [[SHI:s[0-9]+]], 0x7b
> +; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], [[SLO]]
> +; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[SHI]]
> +; GCN: ds_write2_b32 v{{[0-9]+}}, v[[LO]], v[[HI]] offset1:1{{$}}
> +define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
> +
> +  store i32 123, i32 addrspace(3)* %out.gep.1
> +  store i32 456, i32 addrspace(3)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}merge_local_store_4_constants_i32:
> +; GCN: ds_write_b32
> +; GCN: ds_write_b32
> +; GCN: ds_write_b32
> +; GCN: ds_write_b32
> +define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
> +  %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
> +  %out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2
> +  %out.gep.3 = getelementptr i32, i32 addrspace(3)* %out, i32 3
> +
> +  store i32 123, i32 addrspace(3)* %out.gep.1
> +  store i32 456, i32 addrspace(3)* %out.gep.2
> +  store i32 333, i32 addrspace(3)* %out.gep.3
> +  store i32 1234, i32 addrspace(3)* %out
> +  ret void
> +}
> +
> +declare void @llvm.AMDGPU.barrier.local() #1
> +
> +attributes #0 = { nounwind }
> +attributes #1 = { noduplicate nounwind }
> 
> Modified: llvm/trunk/test/CodeGen/R600/store.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/store.ll?rev=238108&r1=238107&r2=238108&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/R600/store.ll (original)
> +++ llvm/trunk/test/CodeGen/R600/store.ll Sat May 23 19:51:27 2015
> @@ -355,8 +355,7 @@ attributes #0 = { nounwind "less-precise
>  ; CM: STORE_DWORD
>  ; CM: STORE_DWORD
>  ; CM: STORE_DWORD
> -; SI: buffer_store_dwordx2
> -; SI: buffer_store_dwordx2
> +; SI: buffer_store_dwordx4
>  define void @i128-const-store(i32 addrspace(1)* %out) {
>  entry:
>    store i32 1, i32 addrspace(1)* %out, align 4
> 
> 
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
-------------- next part --------------
; ModuleID = 'input.cl'
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
target triple = "amdgcn--"

; Function Attrs: nounwind
define void @stack_array_read(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture readonly %in) #0 {
entry:
  %stack = alloca [5 x i32], align 4
  %0 = bitcast [5 x i32]* %stack to i8*
  call void @llvm.lifetime.start(i64 20, i8* %0) #1
  %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
  store i32 4, i32* %arrayidx, align 4, !tbaa !14
  %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
  store i32 5, i32* %arrayidx1, align 4, !tbaa !14
  %arrayidx2 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 2
  store i32 6, i32* %arrayidx2, align 4, !tbaa !14
  %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 3
  store i32 7, i32* %arrayidx3, align 4, !tbaa !14
  %arrayidx4 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 4
  store i32 8, i32* %arrayidx4, align 4, !tbaa !14
  %1 = load i32, i32 addrspace(1)* %in, align 4, !tbaa !14
  %arrayidx6 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
  %2 = load i32, i32* %arrayidx6, align 4, !tbaa !14
  store i32 %2, i32 addrspace(1)* %out, align 4, !tbaa !14
  %arrayidx8 = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 1
  %3 = load i32, i32 addrspace(1)* %arrayidx8, align 4, !tbaa !14
  %arrayidx9 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %3
  %4 = load i32, i32* %arrayidx9, align 4, !tbaa !14
  %arrayidx10 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
  store i32 %4, i32 addrspace(1)* %arrayidx10, align 4, !tbaa !14
  %arrayidx11 = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 2
  %5 = load i32, i32 addrspace(1)* %arrayidx11, align 4, !tbaa !14
  %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %5
  %6 = load i32, i32* %arrayidx12, align 4, !tbaa !14
  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2
  store i32 %6, i32 addrspace(1)* %arrayidx13, align 4, !tbaa !14
  %arrayidx14 = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 3
  %7 = load i32, i32 addrspace(1)* %arrayidx14, align 4, !tbaa !14
  %arrayidx15 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %7
  %8 = load i32, i32* %arrayidx15, align 4, !tbaa !14
  %arrayidx16 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3
  store i32 %8, i32 addrspace(1)* %arrayidx16, align 4, !tbaa !14
  %arrayidx17 = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 4
  %9 = load i32, i32 addrspace(1)* %arrayidx17, align 4, !tbaa !14
  %arrayidx18 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %9
  %10 = load i32, i32* %arrayidx18, align 4, !tbaa !14
  %arrayidx19 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4
  store i32 %10, i32 addrspace(1)* %arrayidx19, align 4, !tbaa !14
  call void @llvm.lifetime.end(i64 20, i8* %0) #1
  ret void
}

; Function Attrs: nounwind
declare void @llvm.lifetime.start(i64, i8* nocapture) #1

; Function Attrs: nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture) #1

attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "target-cpu"="bonaire" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }

!opencl.kernels = !{!0}

!0 = !{void (i32 addrspace(1)*, i32 addrspace(1)*)* @stack_array_read, !1, !2, !3, !4, !5}
!1 = !{!"kernel_arg_addr_space", i32 1, i32 1}
!2 = !{!"kernel_arg_access_qual", !"none", !"none"}
!3 = !{!"kernel_arg_type", !"int*", !"int*"}
!4 = !{!"kernel_arg_base_type", !"int*", !"int*"}
!5 = !{!"kernel_arg_type_qual", !"", !""}
!12 = !{!"clang version 3.7.0 (http://llvm.org/git/clang.git 9a5a6f0e149ba035168641ca6dc4e3b3e5aa29b5) (http://llvm.org/git/llvm.git f44ed0919cbd91fe77aacdfe47701a6d664f1049)"}
!13 = !{!"clang version 3.7.0 (http://llvm.org/git/clang.git 6fd72a7b80dc1053fc03725cfadaf4b77985c806) (http://llvm.org/git/llvm.git 872808e946f3f8be1b30a6672697c2ba8e12f9e1)"}
!14 = !{!15, !15, i64 0}
!15 = !{!"int", !16, i64 0}
!16 = !{!"omnipotent char", !17, i64 0}
!17 = !{!"Simple C/C++ TBAA"}


More information about the llvm-commits mailing list