[llvm] r217379 - R600/SI: Replace LDS atomics with no return versions

Matt Arsenault Matthew.Arsenault at amd.com
Mon Sep 8 08:07:31 PDT 2014


Author: arsenm
Date: Mon Sep  8 10:07:31 2014
New Revision: 217379

URL: http://llvm.org/viewvc/llvm-project?rev=217379&view=rev
Log:
R600/SI: Replace LDS atomics with no return versions

Modified:
    llvm/trunk/lib/Target/R600/SIISelLowering.cpp
    llvm/trunk/lib/Target/R600/SIInstrInfo.td
    llvm/trunk/lib/Target/R600/SIInstructions.td
    llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll
    llvm/trunk/test/CodeGen/R600/atomic_load_add.ll
    llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll
    llvm/trunk/test/CodeGen/R600/local-atomics.ll
    llvm/trunk/test/CodeGen/R600/local-atomics64.ll

Modified: llvm/trunk/lib/Target/R600/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIISelLowering.cpp?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIISelLowering.cpp Mon Sep  8 10:07:31 2014
@@ -1943,27 +1943,39 @@ void SITargetLowering::AdjustInstrPostIn
                                                      SDNode *Node) const {
   const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
       getTargetMachine().getSubtargetImpl()->getInstrInfo());
-  if (!TII->isMIMG(MI->getOpcode()))
-    return;
 
-  unsigned VReg = MI->getOperand(0).getReg();
-  unsigned Writemask = MI->getOperand(1).getImm();
-  unsigned BitsSet = 0;
-  for (unsigned i = 0; i < 4; ++i)
-    BitsSet += Writemask & (1 << i) ? 1 : 0;
-
-  const TargetRegisterClass *RC;
-  switch (BitsSet) {
-  default: return;
-  case 1:  RC = &AMDGPU::VReg_32RegClass; break;
-  case 2:  RC = &AMDGPU::VReg_64RegClass; break;
-  case 3:  RC = &AMDGPU::VReg_96RegClass; break;
+  if (TII->isMIMG(MI->getOpcode())) {
+    unsigned VReg = MI->getOperand(0).getReg();
+    unsigned Writemask = MI->getOperand(1).getImm();
+    unsigned BitsSet = 0;
+    for (unsigned i = 0; i < 4; ++i)
+      BitsSet += Writemask & (1 << i) ? 1 : 0;
+
+    const TargetRegisterClass *RC;
+    switch (BitsSet) {
+    default: return;
+    case 1:  RC = &AMDGPU::VReg_32RegClass; break;
+    case 2:  RC = &AMDGPU::VReg_64RegClass; break;
+    case 3:  RC = &AMDGPU::VReg_96RegClass; break;
+    }
+
+    unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
+    MI->setDesc(TII->get(NewOpcode));
+    MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+    MRI.setRegClass(VReg, RC);
+    return;
   }
 
-  unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
-  MI->setDesc(TII->get(NewOpcode));
-  MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
-  MRI.setRegClass(VReg, RC);
+  // Replace unused atomics with the no return version.
+  int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode());
+  if (NoRetAtomicOp != -1) {
+    if (!Node->hasAnyUseOfValue(0)) {
+      MI->setDesc(TII->get(NoRetAtomicOp));
+      MI->RemoveOperand(0);
+    }
+
+    return;
+  }
 }
 
 MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,

Modified: llvm/trunk/lib/Target/R600/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstrInfo.td?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/R600/SIInstrInfo.td Mon Sep  8 10:07:31 2014
@@ -884,6 +884,8 @@ class DS_1A1D_RET <bits<8> op, string as
   let data1 = 0;
   let mayStore = 1;
   let mayLoad = 1;
+
+  let hasPostISelHook = 1; // Adjusted to no return version.
 }
 
 // 1 address, 2 data.
@@ -896,6 +898,8 @@ class DS_1A2D_RET <bits<8> op, string as
   AtomicNoRet<noRetOp, 1> {
   let mayStore = 1;
   let mayLoad = 1;
+
+  let hasPostISelHook = 1; // Adjusted to no return version.
 }
 
 // 1 address, 2 data.

Modified: llvm/trunk/lib/Target/R600/SIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstructions.td?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/SIInstructions.td Mon Sep  8 10:07:31 2014
@@ -758,7 +758,7 @@ def DS_AND_RTN_B32 : DS_1A1D_RET <0x29,
 def DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "DS_OR_RTN_B32", VReg_32, "DS_OR_B32">;
 def DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "DS_XOR_RTN_B32", VReg_32, "DS_XOR_B32">;
 def DS_MSKOR_RTN_B32 : DS_1A1D_RET <0x2c, "DS_MSKOR_RTN_B32", VReg_32, "DS_MSKOR_B32">;
-def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "DS_WRXCHG_RTN_B32", VReg_32, "DS_WRXCHG_B32">;
+def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "DS_WRXCHG_RTN_B32", VReg_32>;
 //def DS_WRXCHG2_RTN_B32 : DS_2A0D_RET <0x2e, "DS_WRXCHG2_RTN_B32", VReg_32, "DS_WRXCHG2_B32">;
 //def DS_WRXCHG2ST64_RTN_B32 : DS_2A0D_RET <0x2f, "DS_WRXCHG2_RTN_B32", VReg_32, "DS_WRXCHG2ST64_B32">;
 def DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "DS_CMPST_RTN_B32", VReg_32, "DS_CMPST_B32">;

Modified: llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll (original)
+++ llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll Mon Sep  8 10:07:31 2014
@@ -50,3 +50,36 @@ define void @lds_atomic_cmpxchg_ret_i32_
   store i32 %result, i32 addrspace(1)* %out, align 4
   ret void
 }
+
+; FUNC-LABEL: @lds_atomic_cmpxchg_noret_i32_offset:
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
+; SI: S_LOAD_DWORD [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xa
+; SI-DAG: V_MOV_B32_e32 [[VCMP:v[0-9]+]], 7
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI-DAG: V_MOV_B32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
+; SI: DS_CMPST_B32 [[VPTR]], [[VCMP]], [[VSWAP]], 0x10, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %swap) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
+  %result = extractvalue { i32, i1 } %pair, 0
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_cmpxchg_noret_i64_offset:
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
+; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: S_MOV_B64  s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
+; SI-DAG: V_MOV_B32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]]
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI-DAG: V_MOV_B32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
+; SI-DAG: V_MOV_B32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
+; SI: DS_CMPST_B64 [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}}, 0x20, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_cmpxchg_noret_i64_offset(i64 addrspace(3)* %ptr, i64 %swap) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
+  %result = extractvalue { i64, i1 } %pair, 0
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/R600/atomic_load_add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/atomic_load_add.ll?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/atomic_load_add.ll (original)
+++ llvm/trunk/test/CodeGen/R600/atomic_load_add.ll Mon Sep  8 10:07:31 2014
@@ -3,7 +3,7 @@
 
 ; FUNC-LABEL: @atomic_add_local
 ; R600: LDS_ADD *
-; SI: DS_ADD_RTN_U32
+; SI: DS_ADD_U32
 define void @atomic_add_local(i32 addrspace(3)* %local) {
    %unused = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
    ret void
@@ -11,7 +11,7 @@ define void @atomic_add_local(i32 addrsp
 
 ; FUNC-LABEL: @atomic_add_local_const_offset
 ; R600: LDS_ADD *
-; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
 define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
   %gep = getelementptr i32 addrspace(3)* %local, i32 4
   %val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst

Modified: llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll (original)
+++ llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll Mon Sep  8 10:07:31 2014
@@ -3,7 +3,7 @@
 
 ; FUNC-LABEL: @atomic_sub_local
 ; R600: LDS_SUB *
-; SI: DS_SUB_RTN_U32
+; SI: DS_SUB_U32
 define void @atomic_sub_local(i32 addrspace(3)* %local) {
    %unused = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
    ret void
@@ -11,7 +11,7 @@ define void @atomic_sub_local(i32 addrsp
 
 ; FUNC-LABEL: @atomic_sub_local_const_offset
 ; R600: LDS_SUB *
-; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
 define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
   %gep = getelementptr i32 addrspace(3)* %local, i32 4
   %val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst

Modified: llvm/trunk/test/CodeGen/R600/local-atomics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/local-atomics.ll?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/local-atomics.ll (original)
+++ llvm/trunk/test/CodeGen/R600/local-atomics.ll Mon Sep  8 10:07:31 2014
@@ -279,3 +279,253 @@ define void @lds_atomic_umax_ret_i32_off
   store i32 %result, i32 addrspace(1)* %out, align 4
   ret void
 }
+
+; FUNC-LABEL: @lds_atomic_xchg_noret_i32:
+; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
+; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
+; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; SI: DS_WRXCHG_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xchg_noret_i32_offset:
+; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; XXX - Is it really necessary to load 4 into VGPR?
+; FUNC-LABEL: @lds_atomic_add_noret_i32:
+; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
+; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
+; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; SI: DS_ADD_U32 [[VPTR]], [[DATA]], 0x0, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_noret_i32_offset:
+; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_noret_i32_bad_si_offset
+; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
+; CI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
+  %sub = sub i32 %a, %b
+  %add = add i32 %sub, 4
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+  %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_noret_i32:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x0
+; SI: S_ENDPGM
+define void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_noret_i32_offset:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_noret_i32_bad_si_offset:
+; SI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0
+; CI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
+  %sub = sub i32 %a, %b
+  %add = add i32 %sub, 4
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+  %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_noret_i32:
+; SI: DS_SUB_U32
+; SI: S_ENDPGM
+define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_noret_i32_offset:
+; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_noret_i32:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_DEC_U32  v{{[0-9]+}}, [[NEGONE]], 0x0
+; SI: S_ENDPGM
+define void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_noret_i32_offset:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_DEC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_noret_i32:
+; SI: DS_AND_B32
+; SI: S_ENDPGM
+define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_noret_i32_offset:
+; SI: DS_AND_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_noret_i32:
+; SI: DS_OR_B32
+; SI: S_ENDPGM
+define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_noret_i32_offset:
+; SI: DS_OR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_noret_i32:
+; SI: DS_XOR_B32
+; SI: S_ENDPGM
+define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_noret_i32_offset:
+; SI: DS_XOR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FIXME: There is no atomic nand instr
+; XFUNC-LABEL: @lds_atomic_nand_noret_i32:uction, so we somehow need to expand this.
+; define void @lds_atomic_nand_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+;   %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
+;   ret void
+; }
+
+; FUNC-LABEL: @lds_atomic_min_noret_i32:
+; SI: DS_MIN_I32
+; SI: S_ENDPGM
+define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_min_noret_i32_offset:
+; SI: DS_MIN_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_noret_i32:
+; SI: DS_MAX_I32
+; SI: S_ENDPGM
+define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_noret_i32_offset:
+; SI: DS_MAX_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_noret_i32:
+; SI: DS_MIN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_noret_i32_offset:
+; SI: DS_MIN_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_noret_i32:
+; SI: DS_MAX_U32
+; SI: S_ENDPGM
+define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_noret_i32_offset:
+; SI: DS_MAX_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/R600/local-atomics64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/local-atomics64.ll?rev=217379&r1=217378&r2=217379&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/local-atomics64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/local-atomics64.ll Mon Sep  8 10:07:31 2014
@@ -249,3 +249,225 @@ define void @lds_atomic_umax_ret_i64_off
   store i64 %result, i64 addrspace(1)* %out, align 8
   ret void
 }
+
+; FUNC-LABEL: @lds_atomic_xchg_noret_i64:
+; SI: DS_WRXCHG_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xchg_noret_i64_offset:
+; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_noret_i64:
+; SI: DS_ADD_U64
+; SI: S_ENDPGM
+define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_noret_i64_offset:
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI: DS_ADD_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
+  %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_noret_i64:
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI: DS_INC_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
+; SI: S_ENDPGM
+define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_noret_i64_offset:
+; SI: DS_INC_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_noret_i64:
+; SI: DS_SUB_U64
+; SI: S_ENDPGM
+define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_noret_i64_offset:
+; SI: DS_SUB_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_noret_i64:
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI: DS_DEC_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
+; SI: S_ENDPGM
+define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_noret_i64_offset:
+; SI: DS_DEC_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_noret_i64:
+; SI: DS_AND_B64
+; SI: S_ENDPGM
+define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_noret_i64_offset:
+; SI: DS_AND_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_noret_i64:
+; SI: DS_OR_B64
+; SI: S_ENDPGM
+define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_noret_i64_offset:
+; SI: DS_OR_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_noret_i64:
+; SI: DS_XOR_B64
+; SI: S_ENDPGM
+define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_noret_i64_offset:
+; SI: DS_XOR_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FIXME: There is no atomic nand instr
+; XFUNC-LABEL: @lds_atomic_nand_noret_i64:uction, so we somehow need to expand this.
+; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+;   %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
+;   ret void
+; }
+
+; FUNC-LABEL: @lds_atomic_min_noret_i64:
+; SI: DS_MIN_I64
+; SI: S_ENDPGM
+define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_min_noret_i64_offset:
+; SI: DS_MIN_I64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_noret_i64:
+; SI: DS_MAX_I64
+; SI: S_ENDPGM
+define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_noret_i64_offset:
+; SI: DS_MAX_I64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_noret_i64:
+; SI: DS_MIN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_noret_i64_offset:
+; SI: DS_MIN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_noret_i64:
+; SI: DS_MAX_U64
+; SI: S_ENDPGM
+define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
+  ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_noret_i64_offset:
+; SI: DS_MAX_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+  %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+  %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
+  ret void
+}





More information about the llvm-commits mailing list