[PATCH] R600/SI: Fix offset folding in some cases with shifted pointers.

Tom Stellard tom at stellard.net
Fri Aug 15 09:07:30 PDT 2014


On Tue, Aug 05, 2014 at 07:18:37PM +0000, Matt Arsenault wrote:
> Ordinarily (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
> is only done if the add has one use. If the resulting constant
> add can be folded into an addressing mode, force this to happen
> for the pointer operand.
>     
> This ends up happening a lot because of how LDS objects are allocated.
> Since the globals are allocated next to each other, acessing the first
> element of the second object is directly indexed by a shifted pointer.
> 

LGTM.

> Depends on:
> http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20140728/229124.html
> 
> http://reviews.llvm.org/D4795
> 
> Files:
>   lib/Target/R600/SIISelLowering.cpp
>   lib/Target/R600/SIISelLowering.h
>   test/CodeGen/R600/shl_add_ptr.ll

> Index: lib/Target/R600/SIISelLowering.cpp
> ===================================================================
> --- lib/Target/R600/SIISelLowering.cpp
> +++ lib/Target/R600/SIISelLowering.cpp
> @@ -233,6 +233,26 @@
>  
>    setTargetDAGCombine(ISD::UINT_TO_FP);
>  
> +  // All memory operations. Some folding on the pointer operand is done to help
> +  // matching the constant offsets in the addressing modes.
> +  setTargetDAGCombine(ISD::LOAD);
> +  setTargetDAGCombine(ISD::STORE);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD);
> +  setTargetDAGCombine(ISD::ATOMIC_STORE);
> +  setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
> +  setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
> +  setTargetDAGCombine(ISD::ATOMIC_SWAP);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
> +  setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
> +
>    setSchedulingPreference(Sched::RegPressure);
>  }
>  
> @@ -1251,6 +1271,55 @@
>    return SDValue();
>  }
>  
> +// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
> +
> +// This is a variant of
> +// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
> +//
> +// The normal DAG combiner will do this, but only if the add has one use since
> +// that would increase the number of instructions.
> +//
> +// This prevents us from seeing a constant offset that can be folded into a
> +// memory instruction's addressing mode. If we know the resulting add offset of
> +// a pointer can be folded into an addressing offset, we can replace the pointer
> +// operand with the add of new constant offset. This eliminates one of the uses,
> +// and may allow the remaining use to also be simplified.
> +//
> +SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
> +                                               DAGCombinerInfo &DCI) {
> +  SDValue N0 = N->getOperand(0);
> +  SDValue N1 = N->getOperand(1);
> +
> +  if (N0.getOpcode() != ISD::ADD)
> +    return SDValue();
> +
> +  const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
> +  if (!CN1)
> +    return SDValue();
> +
> +  const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
> +  if (!CAdd)
> +    return SDValue();
> +
> +  // XXX: Do we need to check the right number of bits depending on the address
> +  // space. I think they are all 16-bit offsets.
> +
> +  // If the resulting offset is > 16 bits, we can't fold it into the addressing
> +  // mode offset.
> +  APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
> +  if (!Offset.isIntN(16))
> +    return SDValue();
> +
> +  SelectionDAG &DAG = DCI.DAG;
> +  SDLoc SL(N);
> +  EVT VT = N->getValueType(0);
> +
> +  SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
> +  SDValue COffset = DAG.getConstant(Offset, MVT::i32);
> +
> +  return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
> +}
> +
>  SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
>                                              DAGCombinerInfo &DCI) const {
>    SelectionDAG &DAG = DCI.DAG;
> @@ -1283,8 +1352,44 @@
>    case ISD::UINT_TO_FP: {
>      return performUCharToFloatCombine(N, DCI);
>    }
> +  case ISD::LOAD:
> +  case ISD::STORE:
> +  case ISD::ATOMIC_LOAD:
> +  case ISD::ATOMIC_STORE:
> +  case ISD::ATOMIC_CMP_SWAP:
> +  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
> +  case ISD::ATOMIC_SWAP:
> +  case ISD::ATOMIC_LOAD_ADD:
> +  case ISD::ATOMIC_LOAD_SUB:
> +  case ISD::ATOMIC_LOAD_AND:
> +  case ISD::ATOMIC_LOAD_OR:
> +  case ISD::ATOMIC_LOAD_XOR:
> +  case ISD::ATOMIC_LOAD_NAND:
> +  case ISD::ATOMIC_LOAD_MIN:
> +  case ISD::ATOMIC_LOAD_MAX:
> +  case ISD::ATOMIC_LOAD_UMIN:
> +  case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics.
> +    if (DCI.isBeforeLegalize())
> +      break;
> +
> +    MemSDNode *MemNode = cast<MemSDNode>(N);
> +    SDValue Ptr = MemNode->getBasePtr();
> +
> +    if (Ptr.getOpcode() == ISD::SHL &&
> +        MemNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
> +      SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), DCI);
> +      if (NewPtr) {
> +        SmallVector<SDValue, 8> NewOps;
> +        for (unsigned I = 0, N = MemNode->getNumOperands(); I != N; ++I)
> +          NewOps.push_back(MemNode->getOperand(I));
> +
> +        NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
> +        return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0);
> +      }
> +    }
> +    break;
> +  }
>    }
> -
>    return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
>  }
>  
> Index: lib/Target/R600/SIISelLowering.h
> ===================================================================
> --- lib/Target/R600/SIISelLowering.h
> +++ lib/Target/R600/SIISelLowering.h
> @@ -56,6 +56,8 @@
>  
>    static SDValue performUCharToFloatCombine(SDNode *N,
>                                              DAGCombinerInfo &DCI);
> +  static SDValue performSHLPtrCombine(SDNode *N,
> +                                      DAGCombinerInfo &DCI);
>  
>  public:
>    SITargetLowering(TargetMachine &tm);
> Index: test/CodeGen/R600/shl_add_ptr.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/shl_add_ptr.ll
> @@ -0,0 +1,268 @@
> +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
> +
> +; Test that doing a shift of a pointer with a constant add will be
> +; folded into the constant offset addressing mode even if the add has
> +; multiple uses. This is relevant to accessing 2 separate, adjacent
> +; LDS globals.
> +
> +
> +declare i32 @llvm.r600.read.tidig.x() #1
> +
> + at lds0 = addrspace(3) global [512 x float] zeroinitializer, align 4
> + at lds1 = addrspace(3) global [512 x float] zeroinitializer, align 4
> +
> +
> +; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8
> +
> +; SI-LABEL: @load_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x8, [M0]
> +; SI: S_ENDPGM
> +define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
> +  %val0 = load float addrspace(3)* %arrayidx0, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  store float %val0, float addrspace(1)* %out
> +  ret void
> +}
> +
> +; Make sure once the first use is folded into the addressing mode, the
> +; remaining add use goes through the normal shl + add constant fold.
> +
> +; SI-LABEL: @load_shl_base_lds_1
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_READ_B32 [[RESULT:v[0-9]+]], [[PTR]], 0x8, [M0]
> +; SI: V_ADD_I32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
> +; SI-DAG: BUFFER_STORE_DWORD [[RESULT]]
> +; SI-DAG: BUFFER_STORE_DWORD [[ADDUSE]]
> +; SI: S_ENDPGM
> +define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
> +  %val0 = load float addrspace(3)* %arrayidx0, align 4
> +  %shl_add_use = shl i32 %idx.0, 2
> +  store i32 %shl_add_use, i32 addrspace(1)* %add_use, align 4
> +  store float %val0, float addrspace(1)* %out
> +  ret void
> +}
> +
> +; The two globals are placed adjacent in memory, so the same base
> +; pointer can be used with an offset into the second one.
> +
> +; SI-LABEL: @load_shl_base_lds_2
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x100, [M0]
> +; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x900, [M0]
> +; SI: S_ENDPGM
> +define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 64
> +  %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
> +  %val0 = load float addrspace(3)* %arrayidx0, align 4
> +  %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
> +  %val1 = load float addrspace(3)* %arrayidx1, align 4
> +  %sum = fadd float %val0, %val1
> +  store float %sum, float addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @store_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_WRITE_B32 [[PTR]], {{v[0-9]+}}, 0x8 [M0]
> +; SI: S_ENDPGM
> +define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
> +  store float 1.0, float addrspace(3)* %arrayidx0, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +
> +; --------------------------------------------------------------------------------
> +; Atomics.
> +
> + at lds2 = addrspace(3) global [512 x i32] zeroinitializer, align 4
> +
> +; define void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +;   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +;   %idx.0 = add nsw i32 %tid.x, 2
> +;   %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +;   %val = load atomic i32 addrspace(3)* %arrayidx0 seq_cst, align 4
> +;   store i32 %val, i32 addrspace(1)* %out, align 4
> +;   store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +;   ret void
> +; }
> +
> +
> +; SI-LABEL: @atomic_cmpxchg_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_CMPST_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %pair = cmpxchg i32 addrspace(3)* %arrayidx0, i32 7, i32 %swap seq_cst monotonic
> +  %result = extractvalue { i32, i1 } %pair, 0
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_swap_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_WRXCHG_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw xchg i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_add_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_ADD_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw add i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_sub_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_SUB_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw sub i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_and_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_AND_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw and i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_or_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_OR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw or i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_xor_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_XOR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw xor i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; define void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +;   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +;   %idx.0 = add nsw i32 %tid.x, 2
> +;   %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +;   %val = atomicrmw nand i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +;   store i32 %val, i32 addrspace(1)* %out, align 4
> +;   store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +;   ret void
> +; }
> +
> +; SI-LABEL: @atomic_min_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_MIN_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw min i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_max_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_MAX_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw max i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_umin_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_MIN_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw umin i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @atomic_umax_shl_base_lds_0
> +; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
> +; SI: DS_MAX_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
> +; SI: S_ENDPGM
> +define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
> +  %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
> +  %idx.0 = add nsw i32 %tid.x, 2
> +  %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
> +  %val = atomicrmw umax i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
> +  store i32 %val, i32 addrspace(1)* %out, align 4
> +  store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
> +  ret void
> +}
> +
> +attributes #0 = { nounwind }
> +attributes #1 = { nounwind readnone }

> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list