[PATCH] R600/SI: Replace LDS atomics with no return versions

Tom Stellard tom at stellard.net
Mon Sep 8 06:22:44 PDT 2014


On Sun, Sep 07, 2014 at 12:35:01AM +0000, Matt Arsenault wrote:
> http://reviews.llvm.org/D5233
> 

LGTM.

-Tom

> Files:
>   lib/Target/R600/SIISelLowering.cpp
>   lib/Target/R600/SIInstrInfo.td
>   lib/Target/R600/SIInstructions.td
>   test/CodeGen/R600/atomic_cmp_swap_local.ll
>   test/CodeGen/R600/atomic_load_add.ll
>   test/CodeGen/R600/atomic_load_sub.ll
>   test/CodeGen/R600/local-atomics.ll
>   test/CodeGen/R600/local-atomics64.ll

> Index: lib/Target/R600/SIISelLowering.cpp
> ===================================================================
> --- lib/Target/R600/SIISelLowering.cpp
> +++ lib/Target/R600/SIISelLowering.cpp
> @@ -1943,27 +1943,38 @@
>                                                       SDNode *Node) const {
>    const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
>        getTargetMachine().getSubtargetImpl()->getInstrInfo());
> -  if (!TII->isMIMG(MI->getOpcode()))
> -    return;
>  
> -  unsigned VReg = MI->getOperand(0).getReg();
> -  unsigned Writemask = MI->getOperand(1).getImm();
> -  unsigned BitsSet = 0;
> -  for (unsigned i = 0; i < 4; ++i)
> -    BitsSet += Writemask & (1 << i) ? 1 : 0;
> -
> -  const TargetRegisterClass *RC;
> -  switch (BitsSet) {
> -  default: return;
> -  case 1:  RC = &AMDGPU::VReg_32RegClass; break;
> -  case 2:  RC = &AMDGPU::VReg_64RegClass; break;
> -  case 3:  RC = &AMDGPU::VReg_96RegClass; break;
> +  if (TII->isMIMG(MI->getOpcode())) {
> +    unsigned VReg = MI->getOperand(0).getReg();
> +    unsigned Writemask = MI->getOperand(1).getImm();
> +    unsigned BitsSet = 0;
> +    for (unsigned i = 0; i < 4; ++i)
> +      BitsSet += Writemask & (1 << i) ? 1 : 0;
> +
> +    const TargetRegisterClass *RC;
> +    switch (BitsSet) {
> +    default: return;
> +    case 1:  RC = &AMDGPU::VReg_32RegClass; break;
> +    case 2:  RC = &AMDGPU::VReg_64RegClass; break;
> +    case 3:  RC = &AMDGPU::VReg_96RegClass; break;
> +    }
> +
> +    unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
> +    MI->setDesc(TII->get(NewOpcode));
> +    MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
> +    MRI.setRegClass(VReg, RC);
> +    return;
>    }
>  
> -  unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
> -  MI->setDesc(TII->get(NewOpcode));
> -  MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
> -  MRI.setRegClass(VReg, RC);
> +  int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode());
> +  if (NoRetAtomicOp != -1) {
> +    if (!Node->hasAnyUseOfValue(0)) {
> +      MI->setDesc(TII->get(NoRetAtomicOp));
> +      MI->RemoveOperand(0);
> +    }
> +
> +    return;
> +  }
>  }
>  
>  MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
> Index: lib/Target/R600/SIInstrInfo.td
> ===================================================================
> --- lib/Target/R600/SIInstrInfo.td
> +++ lib/Target/R600/SIInstrInfo.td
> @@ -884,6 +884,8 @@
>    let data1 = 0;
>    let mayStore = 1;
>    let mayLoad = 1;
> +
> +  let hasPostISelHook = 1; // Adjusted to no return version.
>  }
>  
>  // 1 address, 2 data.
> @@ -896,6 +898,8 @@
>    AtomicNoRet<noRetOp, 1> {
>    let mayStore = 1;
>    let mayLoad = 1;
> +
> +  let hasPostISelHook = 1; // Adjusted to no return version.
>  }
>  
>  // 1 address, 2 data.
> Index: lib/Target/R600/SIInstructions.td
> ===================================================================
> --- lib/Target/R600/SIInstructions.td
> +++ lib/Target/R600/SIInstructions.td
> @@ -758,7 +758,7 @@
>  def DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "DS_OR_RTN_B32", VReg_32, "DS_OR_B32">;
>  def DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "DS_XOR_RTN_B32", VReg_32, "DS_XOR_B32">;
>  def DS_MSKOR_RTN_B32 : DS_1A1D_RET <0x2c, "DS_MSKOR_RTN_B32", VReg_32, "DS_MSKOR_B32">;
> -def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "DS_WRXCHG_RTN_B32", VReg_32, "DS_WRXCHG_B32">;
> +def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "DS_WRXCHG_RTN_B32", VReg_32>;
>  //def DS_WRXCHG2_RTN_B32 : DS_2A0D_RET <0x2e, "DS_WRXCHG2_RTN_B32", VReg_32, "DS_WRXCHG2_B32">;
>  //def DS_WRXCHG2ST64_RTN_B32 : DS_2A0D_RET <0x2f, "DS_WRXCHG2_RTN_B32", VReg_32, "DS_WRXCHG2ST64_B32">;
>  def DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "DS_CMPST_RTN_B32", VReg_32, "DS_CMPST_B32">;
> Index: test/CodeGen/R600/atomic_cmp_swap_local.ll
> ===================================================================
> --- test/CodeGen/R600/atomic_cmp_swap_local.ll
> +++ test/CodeGen/R600/atomic_cmp_swap_local.ll
> @@ -50,3 +50,36 @@
>    store i32 %result, i32 addrspace(1)* %out, align 4
>    ret void
>  }
> +
> +; FUNC-LABEL: @lds_atomic_cmpxchg_noret_i32_offset:
> +; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
> +; SI: S_LOAD_DWORD [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xa
> +; SI-DAG: V_MOV_B32_e32 [[VCMP:v[0-9]+]], 7
> +; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
> +; SI-DAG: V_MOV_B32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
> +; SI: DS_CMPST_B32 [[VPTR]], [[VCMP]], [[VSWAP]], 0x10, [M0]
> +; SI: S_ENDPGM
> +define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %swap) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
> +  %result = extractvalue { i32, i1 } %pair, 0
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_cmpxchg_noret_i64_offset:
> +; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
> +; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
> +; SI: S_MOV_B64  s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
> +; SI-DAG: V_MOV_B32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]]
> +; SI-DAG: V_MOV_B32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]]
> +; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
> +; SI-DAG: V_MOV_B32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
> +; SI-DAG: V_MOV_B32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
> +; SI: DS_CMPST_B64 [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}}, 0x20, [M0]
> +; SI: S_ENDPGM
> +define void @lds_atomic_cmpxchg_noret_i64_offset(i64 addrspace(3)* %ptr, i64 %swap) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
> +  %result = extractvalue { i64, i1 } %pair, 0
> +  ret void
> +}
> Index: test/CodeGen/R600/atomic_load_add.ll
> ===================================================================
> --- test/CodeGen/R600/atomic_load_add.ll
> +++ test/CodeGen/R600/atomic_load_add.ll
> @@ -3,7 +3,7 @@
>  
>  ; FUNC-LABEL: @atomic_add_local
>  ; R600: LDS_ADD *
> -; SI: DS_ADD_RTN_U32
> +; SI: DS_ADD_U32
>  define void @atomic_add_local(i32 addrspace(3)* %local) {
>     %unused = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
>     ret void
> @@ -11,7 +11,7 @@
>  
>  ; FUNC-LABEL: @atomic_add_local_const_offset
>  ; R600: LDS_ADD *
> -; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
>  define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
>    %gep = getelementptr i32 addrspace(3)* %local, i32 4
>    %val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
> Index: test/CodeGen/R600/atomic_load_sub.ll
> ===================================================================
> --- test/CodeGen/R600/atomic_load_sub.ll
> +++ test/CodeGen/R600/atomic_load_sub.ll
> @@ -3,7 +3,7 @@
>  
>  ; FUNC-LABEL: @atomic_sub_local
>  ; R600: LDS_SUB *
> -; SI: DS_SUB_RTN_U32
> +; SI: DS_SUB_U32
>  define void @atomic_sub_local(i32 addrspace(3)* %local) {
>     %unused = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
>     ret void
> @@ -11,7 +11,7 @@
>  
>  ; FUNC-LABEL: @atomic_sub_local_const_offset
>  ; R600: LDS_SUB *
> -; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
>  define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
>    %gep = getelementptr i32 addrspace(3)* %local, i32 4
>    %val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
> Index: test/CodeGen/R600/local-atomics.ll
> ===================================================================
> --- test/CodeGen/R600/local-atomics.ll
> +++ test/CodeGen/R600/local-atomics.ll
> @@ -279,3 +279,253 @@
>    store i32 %result, i32 addrspace(1)* %out, align 4
>    ret void
>  }
> +
> +; FUNC-LABEL: @lds_atomic_xchg_noret_i32:
> +; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
> +; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
> +; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
> +; SI: DS_WRXCHG_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0]
> +; SI: S_ENDPGM
> +define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_xchg_noret_i32_offset:
> +; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; XXX - Is it really necessary to load 4 into VGPR?
> +; FUNC-LABEL: @lds_atomic_add_noret_i32:
> +; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
> +; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
> +; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
> +; SI: DS_ADD_U32 [[VPTR]], [[DATA]], 0x0, [M0]
> +; SI: S_ENDPGM
> +define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_add_noret_i32_offset:
> +; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_add_noret_i32_bad_si_offset
> +; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
> +; CI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
> +  %sub = sub i32 %a, %b
> +  %add = add i32 %sub, 4
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
> +  %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_inc_noret_i32:
> +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
> +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
> +; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x0
> +; SI: S_ENDPGM
> +define void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_inc_noret_i32_offset:
> +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
> +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
> +; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_inc_noret_i32_bad_si_offset:
> +; SI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
> +; CI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
> +  %sub = sub i32 %a, %b
> +  %add = add i32 %sub, 4
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
> +  %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_sub_noret_i32:
> +; SI: DS_SUB_U32
> +; SI: S_ENDPGM
> +define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_sub_noret_i32_offset:
> +; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_dec_noret_i32:
> +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
> +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
> +; SI: DS_DEC_U32  v{{[0-9]+}}, [[NEGONE]], 0x0
> +; SI: S_ENDPGM
> +define void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_dec_noret_i32_offset:
> +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
> +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
> +; SI: DS_DEC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_and_noret_i32:
> +; SI: DS_AND_B32
> +; SI: S_ENDPGM
> +define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_and_noret_i32_offset:
> +; SI: DS_AND_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_or_noret_i32:
> +; SI: DS_OR_B32
> +; SI: S_ENDPGM
> +define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_or_noret_i32_offset:
> +; SI: DS_OR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_xor_noret_i32:
> +; SI: DS_XOR_B32
> +; SI: S_ENDPGM
> +define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_xor_noret_i32_offset:
> +; SI: DS_XOR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FIXME: There is no atomic nand instr
> +; XFUNC-LABEL: @lds_atomic_nand_noret_i32:uction, so we somehow need to expand this.
> +; define void @lds_atomic_nand_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +;   %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
> +;   ret void
> +; }
> +
> +; FUNC-LABEL: @lds_atomic_min_noret_i32:
> +; SI: DS_MIN_I32
> +; SI: S_ENDPGM
> +define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_min_noret_i32_offset:
> +; SI: DS_MIN_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_max_noret_i32:
> +; SI: DS_MAX_I32
> +; SI: S_ENDPGM
> +define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_max_noret_i32_offset:
> +; SI: DS_MAX_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umin_noret_i32:
> +; SI: DS_MIN_U32
> +; SI: S_ENDPGM
> +define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umin_noret_i32_offset:
> +; SI: DS_MIN_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umax_noret_i32:
> +; SI: DS_MAX_U32
> +; SI: S_ENDPGM
> +define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umax_noret_i32_offset:
> +; SI: DS_MAX_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
> +; SI: S_ENDPGM
> +define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
> +  ret void
> +}
> Index: test/CodeGen/R600/local-atomics64.ll
> ===================================================================
> --- test/CodeGen/R600/local-atomics64.ll
> +++ test/CodeGen/R600/local-atomics64.ll
> @@ -249,3 +249,225 @@
>    store i64 %result, i64 addrspace(1)* %out, align 8
>    ret void
>  }
> +
> +; FUNC-LABEL: @lds_atomic_xchg_noret_i64:
> +; SI: DS_WRXCHG_RTN_B64
> +; SI: S_ENDPGM
> +define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_xchg_noret_i64_offset:
> +; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_add_noret_i64:
> +; SI: DS_ADD_U64
> +; SI: S_ENDPGM
> +define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_add_noret_i64_offset:
> +; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
> +; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
> +; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
> +; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
> +; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
> +; SI: DS_ADD_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0]
> +; SI: S_ENDPGM
> +define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
> +  %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_inc_noret_i64:
> +; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
> +; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
> +; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
> +; SI: DS_INC_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
> +; SI: S_ENDPGM
> +define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_inc_noret_i64_offset:
> +; SI: DS_INC_U64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_sub_noret_i64:
> +; SI: DS_SUB_U64
> +; SI: S_ENDPGM
> +define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_sub_noret_i64_offset:
> +; SI: DS_SUB_U64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_dec_noret_i64:
> +; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
> +; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
> +; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
> +; SI: DS_DEC_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
> +; SI: S_ENDPGM
> +define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_dec_noret_i64_offset:
> +; SI: DS_DEC_U64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_and_noret_i64:
> +; SI: DS_AND_B64
> +; SI: S_ENDPGM
> +define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_and_noret_i64_offset:
> +; SI: DS_AND_B64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_or_noret_i64:
> +; SI: DS_OR_B64
> +; SI: S_ENDPGM
> +define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_or_noret_i64_offset:
> +; SI: DS_OR_B64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_xor_noret_i64:
> +; SI: DS_XOR_B64
> +; SI: S_ENDPGM
> +define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_xor_noret_i64_offset:
> +; SI: DS_XOR_B64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FIXME: There is no atomic nand instr
> +; XFUNC-LABEL: @lds_atomic_nand_noret_i64:uction, so we somehow need to expand this.
> +; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +;   %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
> +;   ret void
> +; }
> +
> +; FUNC-LABEL: @lds_atomic_min_noret_i64:
> +; SI: DS_MIN_I64
> +; SI: S_ENDPGM
> +define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_min_noret_i64_offset:
> +; SI: DS_MIN_I64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_max_noret_i64:
> +; SI: DS_MAX_I64
> +; SI: S_ENDPGM
> +define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_max_noret_i64_offset:
> +; SI: DS_MAX_I64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umin_noret_i64:
> +; SI: DS_MIN_U64
> +; SI: S_ENDPGM
> +define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umin_noret_i64_offset:
> +; SI: DS_MIN_U64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umax_noret_i64:
> +; SI: DS_MAX_U64
> +; SI: S_ENDPGM
> +define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
> +  %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
> +  ret void
> +}
> +
> +; FUNC-LABEL: @lds_atomic_umax_noret_i64_offset:
> +; SI: DS_MAX_U64 {{.*}} 0x20
> +; SI: S_ENDPGM
> +define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
> +  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
> +  %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
> +  ret void
> +}

> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list