[llvm] r351851 - Codegen support for atomicrmw fadd/fsub

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 22 10:36:07 PST 2019


Author: arsenm
Date: Tue Jan 22 10:36:06 2019
New Revision: 351851

URL: http://llvm.org/viewvc/llvm-project?rev=351851&view=rev
Log:
Codegen support for atomicrmw fadd/fsub

Added:
    llvm/trunk/test/CodeGen/AMDGPU/local-atomics-fp.ll
    llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
    llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
    llvm/trunk/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
Modified:
    llvm/trunk/include/llvm/CodeGen/ISDOpcodes.h
    llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h
    llvm/trunk/include/llvm/Target/TargetSelectionDAG.td
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp

Modified: llvm/trunk/include/llvm/CodeGen/ISDOpcodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/ISDOpcodes.h?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/ISDOpcodes.h (original)
+++ llvm/trunk/include/llvm/CodeGen/ISDOpcodes.h Tue Jan 22 10:36:06 2019
@@ -818,6 +818,8 @@ namespace ISD {
     ATOMIC_LOAD_MAX,
     ATOMIC_LOAD_UMIN,
     ATOMIC_LOAD_UMAX,
+    ATOMIC_LOAD_FADD,
+    ATOMIC_LOAD_FSUB,
 
     // Masked load and store - consecutive vector load and store operations
     // with additional mask operand that prevents memory accesses to the

Modified: llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h (original)
+++ llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h Tue Jan 22 10:36:06 2019
@@ -1359,6 +1359,8 @@ public:
            N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
            N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
            N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_FADD    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_FSUB    ||
            N->getOpcode() == ISD::ATOMIC_LOAD         ||
            N->getOpcode() == ISD::ATOMIC_STORE        ||
            N->getOpcode() == ISD::MLOAD               ||
@@ -1411,6 +1413,8 @@ public:
            N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
            N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
            N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_FADD    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_FSUB    ||
            N->getOpcode() == ISD::ATOMIC_LOAD         ||
            N->getOpcode() == ISD::ATOMIC_STORE;
   }

Modified: llvm/trunk/include/llvm/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetSelectionDAG.td?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetSelectionDAG.td (original)
+++ llvm/trunk/include/llvm/Target/TargetSelectionDAG.td Tue Jan 22 10:36:06 2019
@@ -264,6 +264,11 @@ def SDTAtomic3 : SDTypeProfile<1, 3, [
 def SDTAtomic2 : SDTypeProfile<1, 2, [
   SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
 ]>;
+
+def SDTFPAtomic2 : SDTypeProfile<1, 2, [
+  SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
+]>;
+
 def SDTAtomicStore : SDTypeProfile<0, 2, [
   SDTCisPtrTy<0>, SDTCisInt<1>
 ]>;
@@ -510,6 +515,11 @@ def atomic_load_umin : SDNode<"ISD::ATOM
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_fadd : SDNode<"ISD::ATOMIC_LOAD_FADD" , SDTFPAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_fsub : SDNode<"ISD::ATOMIC_LOAD_FSUB" , SDTFPAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+
 def atomic_load      : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
                     [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_store     : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Tue Jan 22 10:36:06 2019
@@ -6464,6 +6464,8 @@ SDValue SelectionDAG::getAtomic(unsigned
           Opcode == ISD::ATOMIC_LOAD_MAX ||
           Opcode == ISD::ATOMIC_LOAD_UMIN ||
           Opcode == ISD::ATOMIC_LOAD_UMAX ||
+          Opcode == ISD::ATOMIC_LOAD_FADD ||
+          Opcode == ISD::ATOMIC_LOAD_FSUB ||
           Opcode == ISD::ATOMIC_SWAP ||
           Opcode == ISD::ATOMIC_STORE) &&
          "Invalid Atomic Op");

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Tue Jan 22 10:36:06 2019
@@ -4204,6 +4204,8 @@ void SelectionDAGBuilder::visitAtomicRMW
   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
+  case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
+  case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
   }
   AtomicOrdering Order = I.getOrdering();
   SyncScope::ID SSID = I.getSyncScopeID();

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp Tue Jan 22 10:36:06 2019
@@ -95,6 +95,7 @@ std::string SDNode::getOperationName(con
   case ISD::ATOMIC_LOAD_MAX:            return "AtomicLoadMax";
   case ISD::ATOMIC_LOAD_UMIN:           return "AtomicLoadUMin";
   case ISD::ATOMIC_LOAD_UMAX:           return "AtomicLoadUMax";
+  case ISD::ATOMIC_LOAD_FADD:           return "AtomicLoadFAdd";
   case ISD::ATOMIC_LOAD:                return "AtomicLoad";
   case ISD::ATOMIC_STORE:               return "AtomicStore";
   case ISD::PCMARKER:                   return "PCMarker";

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp Tue Jan 22 10:36:06 2019
@@ -469,7 +469,7 @@ void AMDGPUDAGToDAGISel::Select(SDNode *
 
   if (isa<AtomicSDNode>(N) ||
       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
-       Opc == AMDGPUISD::ATOMIC_LOAD_FADD ||
+       Opc == ISD::ATOMIC_LOAD_FADD ||
        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX))
     N = glueCopyToM0(N);

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Tue Jan 22 10:36:06 2019
@@ -4194,7 +4194,6 @@ const char* AMDGPUTargetLowering::getTar
   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
   NODE_NAME_CASE(ATOMIC_INC)
   NODE_NAME_CASE(ATOMIC_DEC)
-  NODE_NAME_CASE(ATOMIC_LOAD_FADD)
   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
   NODE_NAME_CASE(BUFFER_LOAD)
@@ -4518,7 +4517,12 @@ bool AMDGPUTargetLowering::isKnownNeverN
 
 TargetLowering::AtomicExpansionKind
 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
-  if (RMW->getOperation() == AtomicRMWInst::Nand)
+  switch (RMW->getOperation()) {
+  case AtomicRMWInst::Nand:
+  case AtomicRMWInst::FAdd:
+  case AtomicRMWInst::FSub:
     return AtomicExpansionKind::CmpXChg;
-  return AtomicExpansionKind::None;
+  default:
+    return AtomicExpansionKind::None;
+  }
 }

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h Tue Jan 22 10:36:06 2019
@@ -477,7 +477,6 @@ enum NodeType : unsigned {
   ATOMIC_CMP_SWAP,
   ATOMIC_INC,
   ATOMIC_DEC,
-  ATOMIC_LOAD_FADD,
   ATOMIC_LOAD_FMIN,
   ATOMIC_LOAD_FMAX,
   BUFFER_LOAD,

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h Tue Jan 22 10:36:06 2019
@@ -791,6 +791,9 @@ public:
     return HasScalarAtomics;
   }
 
+  bool hasLDSFPAtomics() const {
+    return VIInsts;
+  }
 
   bool hasDPP() const {
     return HasDPP;

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Tue Jan 22 10:36:06 2019
@@ -699,6 +699,7 @@ SITargetLowering::SITargetLowering(const
   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
+  setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
 
   setSchedulingPreference(Sched::RegPressure);
 
@@ -5491,9 +5492,21 @@ SDValue SITargetLowering::LowerINTRINSIC
                                    M->getVTList(), Ops, M->getMemoryVT(),
                                    M->getMemOperand());
   }
+  case Intrinsic::amdgcn_ds_fadd: {
+    MemSDNode *M = cast<MemSDNode>(Op);
+    unsigned Opc;
+    switch (IntrID) {
+    case Intrinsic::amdgcn_ds_fadd:
+      Opc = ISD::ATOMIC_LOAD_FADD;
+      break;
+    }
+
+    return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
+                         M->getOperand(0), M->getOperand(2), M->getOperand(3),
+                         M->getMemOperand());
+  }
   case Intrinsic::amdgcn_atomic_inc:
   case Intrinsic::amdgcn_atomic_dec:
-  case Intrinsic::amdgcn_ds_fadd:
   case Intrinsic::amdgcn_ds_fmin:
   case Intrinsic::amdgcn_ds_fmax: {
     MemSDNode *M = cast<MemSDNode>(Op);
@@ -5505,9 +5518,6 @@ SDValue SITargetLowering::LowerINTRINSIC
     case Intrinsic::amdgcn_atomic_dec:
       Opc = AMDGPUISD::ATOMIC_DEC;
       break;
-    case Intrinsic::amdgcn_ds_fadd:
-      Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
-      break;
     case Intrinsic::amdgcn_ds_fmin:
       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
       break;
@@ -8926,11 +8936,11 @@ SDValue SITargetLowering::PerformDAGComb
   case ISD::ATOMIC_LOAD_MAX:
   case ISD::ATOMIC_LOAD_UMIN:
   case ISD::ATOMIC_LOAD_UMAX:
+  case ISD::ATOMIC_LOAD_FADD:
   case AMDGPUISD::ATOMIC_INC:
   case AMDGPUISD::ATOMIC_DEC:
-  case AMDGPUISD::ATOMIC_LOAD_FADD:
   case AMDGPUISD::ATOMIC_LOAD_FMIN:
-  case AMDGPUISD::ATOMIC_LOAD_FMAX:  // TODO: Target mem intrinsics.
+  case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
     if (DCI.isBeforeLegalize())
       break;
     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
@@ -9722,3 +9732,29 @@ bool SITargetLowering::isKnownNeverNaNFo
   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
                                                             SNaN, Depth);
 }
+
+TargetLowering::AtomicExpansionKind
+SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
+  switch (RMW->getOperation()) {
+  case AtomicRMWInst::FAdd: {
+    Type *Ty = RMW->getType();
+
+    // We don't have a way to support 16-bit atomics now, so just leave them
+    // as-is.
+    if (Ty->isHalfTy())
+      return AtomicExpansionKind::None;
+
+    if (!Ty->isFloatTy())
+      return AtomicExpansionKind::CmpXChg;
+
+    // TODO: Do have these for flat. Older targets also had them for buffers.
+    unsigned AS = RMW->getPointerAddressSpace();
+    return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
+      AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
+  }
+  default:
+    break;
+  }
+
+  return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
+}

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h Tue Jan 22 10:36:06 2019
@@ -351,6 +351,7 @@ public:
                                     const SelectionDAG &DAG,
                                     bool SNaN = false,
                                     unsigned Depth = 0) const override;
+  AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
 };
 
 } // End namespace llvm

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td Tue Jan 22 10:36:06 2019
@@ -61,10 +61,6 @@ def SDTAtomic2_f32 : SDTypeProfile<1, 2,
   SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
 ]>;
 
-def SIatomic_fadd : SDNode<"AMDGPUISD::ATOMIC_LOAD_FADD", SDTAtomic2_f32,
-  [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
->;
-
 def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32,
   [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
 >;
@@ -232,7 +228,7 @@ defm atomic_dec_global : global_binary_a
 
 def atomic_inc_local : local_binary_atomic_op<SIatomic_inc>;
 def atomic_dec_local : local_binary_atomic_op<SIatomic_dec>;
-def atomic_load_fadd_local : local_binary_atomic_op<SIatomic_fadd>;
+def atomic_load_fadd_local : local_binary_atomic_op<atomic_load_fadd>;
 def atomic_load_fmin_local : local_binary_atomic_op<SIatomic_fmin>;
 def atomic_load_fmax_local : local_binary_atomic_op<SIatomic_fmax>;
 
@@ -428,7 +424,7 @@ defm atomic_load_xor : SIAtomicM0Glue2 <
 defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
 defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
 defm atomic_swap : SIAtomicM0Glue2 <"SWAP">;
-defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 1, SDTAtomic2_f32>;
+defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32>;
 defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32>;
 defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32>;
 

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=351851&r1=351850&r2=351851&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jan 22 10:36:06 2019
@@ -25234,6 +25234,8 @@ X86TargetLowering::shouldExpandAtomicRMW
   case AtomicRMWInst::Min:
   case AtomicRMWInst::UMax:
   case AtomicRMWInst::UMin:
+  case AtomicRMWInst::FAdd:
+  case AtomicRMWInst::FSub:
     // These always require a non-trivial set of data operations on x86. We must
     // use a cmpxchg loop.
     return AtomicExpansionKind::CmpXChg;

Added: llvm/trunk/test/CodeGen/AMDGPU/local-atomics-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-atomics-fp.ll?rev=351851&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/local-atomics-fp.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/local-atomics-fp.ll Tue Jan 22 10:36:06 2019
@@ -0,0 +1,109 @@
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX678,HAS-ATOMICS %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,HAS-ATOMICS %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f32:
+; GFX678-DAG: s_mov_b32 m0
+; GFX9-NOT: m0
+; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0
+; HAS-ATOMICS: ds_add_rtn_f32 v0, v0, [[K]]
+
+; NO-ATOMICS: ds_read_b32
+; NO-ATOMICS: v_add_f32
+; NO-ATOMICS: ds_cmpst_rtn_b32
+; NO-ATOMICS: s_cbranch_execnz
+define float @lds_atomic_fadd_ret_f32(float addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw fadd float addrspace(3)* %ptr, float 4.0 seq_cst
+  ret float %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f32:
+; GFX678-DAG: s_mov_b32 m0
+; GFX9-NOT: m0
+; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0
+; HAS-ATOMICS: ds_add_f32 v0, [[K]]
+define void @lds_atomic_fadd_noret_f32(float addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw fadd float addrspace(3)* %ptr, float 4.0 seq_cst
+  ret void
+}
+
+; GCN-LABEL: {{^}}lds_ds_fadd:
+; VI-DAG: s_mov_b32 m0
+; GFX9-NOT: m0
+; HAS-ATOMICS-DAG: v_mov_b32_e32 [[V0:v[0-9]+]], 0x42280000
+; HAS-ATOMICS: ds_add_rtn_f32 [[V2:v[0-9]+]], [[V1:v[0-9]+]], [[V0]] offset:32
+; HAS-ATOMICS: ds_add_f32 [[V3:v[0-9]+]], [[V0]] offset:64
+; HAS-ATOMICS: s_waitcnt lgkmcnt(1)
+; HAS-ATOMICS: ds_add_rtn_f32 {{v[0-9]+}}, {{v[0-9]+}}, [[V2]]
+define amdgpu_kernel void @lds_ds_fadd(float addrspace(1)* %out, float addrspace(3)* %ptrf, i32 %idx) {
+  %idx.add = add nuw i32 %idx, 4
+  %shl0 = shl i32 %idx.add, 3
+  %shl1 = shl i32 %idx.add, 4
+  %ptr0 = inttoptr i32 %shl0 to float addrspace(3)*
+  %ptr1 = inttoptr i32 %shl1 to float addrspace(3)*
+  %a1 = atomicrmw fadd float addrspace(3)* %ptr0, float 4.2e+1 seq_cst
+  %a2 = atomicrmw fadd float addrspace(3)* %ptr1, float 4.2e+1 seq_cst
+  %a3 = atomicrmw fadd float addrspace(3)* %ptrf, float %a1 seq_cst
+  store float %a3, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64
+; GCN: ds_cmpst_rtn_b64
+; GCN: s_cbranch_execnz
+define double @lds_atomic_fadd_ret_f64(double addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
+  ret double %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64
+; GCN: ds_cmpst_rtn_b64
+; GCN: s_cbranch_execnz
+define void @lds_atomic_fadd_noret_f64(double addrspace(3)* %ptr) nounwind {
+  %result = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
+  ret void
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f32:
+; GCN: ds_read_b32
+; GCN: v_sub_f32
+; GCN: ds_cmpst_rtn_b32
+; GCN: s_cbranch_execnz
+define float @lds_atomic_fsub_ret_f32(float addrspace(3)* %ptr, float %val) nounwind {
+  %result = atomicrmw fsub float addrspace(3)* %ptr, float %val seq_cst
+  ret float %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f32:
+; GCN: ds_read_b32
+; GCN: v_sub_f32
+; GCN: ds_cmpst_rtn_b32
+define void @lds_atomic_fsub_noret_f32(float addrspace(3)* %ptr, float %val) nounwind {
+  %result = atomicrmw fsub float addrspace(3)* %ptr, float %val seq_cst
+  ret void
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
+; GCN: ds_cmpst_rtn_b64
+
+define double @lds_atomic_fsub_ret_f64(double addrspace(3)* %ptr, double %val) nounwind {
+  %result = atomicrmw fsub double addrspace(3)* %ptr, double %val seq_cst
+  ret double %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
+; GCN: ds_cmpst_rtn_b64
+; GCN: s_cbranch_execnz
+define void @lds_atomic_fsub_noret_f64(double addrspace(3)* %ptr, double %val) nounwind {
+  %result = atomicrmw fsub double addrspace(3)* %ptr, double %val seq_cst
+  ret void
+}

Added: llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll?rev=351851&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll (added)
+++ llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll Tue Jan 22 10:36:06 2019
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=CI %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GFX9 %s
+
+define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f32_flat(
+; CI-NEXT:    [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; CI-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CI:       atomicrmw.start:
+; CI-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT:    [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; CI-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CI-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CI-NEXT:    [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CI-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CI-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CI-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CI-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI:       atomicrmw.end:
+; CI-NEXT:    ret float [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f32_flat(
+; GFX9-NEXT:    [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; GFX9-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GFX9:       atomicrmw.start:
+; GFX9-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT:    [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; GFX9-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GFX9-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GFX9-NEXT:    [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GFX9-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GFX9-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GFX9-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9:       atomicrmw.end:
+; GFX9-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fadd float* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define float @test_atomicrmw_fadd_f32_global(float addrspace(1)* %ptr, float %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f32_global(
+; CI-NEXT:    [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; CI-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CI:       atomicrmw.start:
+; CI-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT:    [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; CI-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CI-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CI-NEXT:    [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CI-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CI-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CI-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CI-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI:       atomicrmw.end:
+; CI-NEXT:    ret float [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f32_global(
+; GFX9-NEXT:    [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; GFX9-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GFX9:       atomicrmw.start:
+; GFX9-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT:    [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; GFX9-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GFX9-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GFX9-NEXT:    [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GFX9-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GFX9-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GFX9-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9:       atomicrmw.end:
+; GFX9-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define float @test_atomicrmw_fadd_f32_local(float addrspace(3)* %ptr, float %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f32_local(
+; CI-NEXT:    [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4
+; CI-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CI:       atomicrmw.start:
+; CI-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT:    [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)*
+; CI-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CI-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CI-NEXT:    [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CI-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CI-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CI-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CI-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI:       atomicrmw.end:
+; CI-NEXT:    ret float [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f32_local(
+; GFX9-NEXT:    [[RES:%.*]] = atomicrmw fadd float addrspace(3)* [[PTR:%.*]], float [[VALUE:%.*]] seq_cst
+; GFX9-NEXT:    ret float [[RES]]
+;
+  %res = atomicrmw fadd float addrspace(3)* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define half @test_atomicrmw_fadd_f16_flat(half* %ptr, half %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f16_flat(
+; CI-NEXT:    [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; CI-NEXT:    ret half [[RES]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f16_flat(
+; GFX9-NEXT:    [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; GFX9-NEXT:    ret half [[RES]]
+;
+  %res = atomicrmw fadd half* %ptr, half %value seq_cst
+  ret half %res
+}
+
+define half @test_atomicrmw_fadd_f16_global(half addrspace(1)* %ptr, half %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f16_global(
+; CI-NEXT:    [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; CI-NEXT:    ret half [[RES]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f16_global(
+; GFX9-NEXT:    [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; GFX9-NEXT:    ret half [[RES]]
+;
+  %res = atomicrmw fadd half addrspace(1)* %ptr, half %value seq_cst
+  ret half %res
+}
+
+define half @test_atomicrmw_fadd_f16_local(half addrspace(3)* %ptr, half %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f16_local(
+; CI-NEXT:    [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; CI-NEXT:    ret half [[RES]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f16_local(
+; GFX9-NEXT:    [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; GFX9-NEXT:    ret half [[RES]]
+;
+  %res = atomicrmw fadd half addrspace(3)* %ptr, half %value seq_cst
+  ret half %res
+}
+
+define double @test_atomicrmw_fadd_f64_flat(double* %ptr, double %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f64_flat(
+; CI-NEXT:    [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; CI-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CI:       atomicrmw.start:
+; CI-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT:    [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; CI-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CI-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CI-NEXT:    [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CI-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CI-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CI-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CI-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI:       atomicrmw.end:
+; CI-NEXT:    ret double [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f64_flat(
+; GFX9-NEXT:    [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; GFX9-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GFX9:       atomicrmw.start:
+; GFX9-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT:    [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; GFX9-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GFX9-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GFX9-NEXT:    [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GFX9-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GFX9-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GFX9-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9:       atomicrmw.end:
+; GFX9-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fadd double* %ptr, double %value seq_cst
+  ret double %res
+}
+
+define double @test_atomicrmw_fadd_f64_global(double addrspace(1)* %ptr, double %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f64_global(
+; CI-NEXT:    [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8
+; CI-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CI:       atomicrmw.start:
+; CI-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT:    [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)*
+; CI-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CI-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CI-NEXT:    [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CI-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CI-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CI-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CI-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI:       atomicrmw.end:
+; CI-NEXT:    ret double [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f64_global(
+; GFX9-NEXT:    [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8
+; GFX9-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GFX9:       atomicrmw.start:
+; GFX9-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT:    [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)*
+; GFX9-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GFX9-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GFX9-NEXT:    [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GFX9-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GFX9-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GFX9-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9:       atomicrmw.end:
+; GFX9-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fadd double addrspace(1)* %ptr, double %value seq_cst
+  ret double %res
+}
+
+define double @test_atomicrmw_fadd_f64_local(double addrspace(3)* %ptr, double %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f64_local(
+; CI-NEXT:    [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8
+; CI-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CI:       atomicrmw.start:
+; CI-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT:    [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)*
+; CI-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CI-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CI-NEXT:    [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CI-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CI-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CI-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CI-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI:       atomicrmw.end:
+; CI-NEXT:    ret double [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f64_local(
+; GFX9-NEXT:    [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8
+; GFX9-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GFX9:       atomicrmw.start:
+; GFX9-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT:    [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)*
+; GFX9-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GFX9-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GFX9-NEXT:    [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GFX9-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GFX9-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GFX9-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9:       atomicrmw.end:
+; GFX9-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fadd double addrspace(3)* %ptr, double %value seq_cst
+  ret double %res
+}
+

Added: llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll?rev=351851&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll (added)
+++ llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll Tue Jan 22 10:36:06 2019
@@ -0,0 +1,201 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+
+define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) {
+; GCN-LABEL: @test_atomicrmw_fadd_f32_flat(
+; GCN-NEXT:    [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fsub float* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define float @test_atomicrmw_fsub_f32_global(float addrspace(1)* %ptr, float %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f32_global(
+; GCN-NEXT:    [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fsub float addrspace(1)* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define float @test_atomicrmw_fsub_f32_local(float addrspace(3)* %ptr, float %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f32_local(
+; GCN-NEXT:    [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fsub float addrspace(3)* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define half @test_atomicrmw_fsub_f16_flat(half* %ptr, half %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f16_flat(
+; GCN-NEXT:    [[TMP1:%.*]] = load half, half* [[PTR:%.*]], align 2
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast half* [[PTR]] to i16*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half [[NEW]] to i16
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast half [[LOADED]] to i16
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i16* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i16 [[NEWLOADED]] to half
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret half [[TMP6]]
+;
+  %res = atomicrmw fsub half* %ptr, half %value seq_cst
+  ret half %res
+}
+
+define half @test_atomicrmw_fsub_f16_global(half addrspace(1)* %ptr, half %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f16_global(
+; GCN-NEXT:    [[TMP1:%.*]] = load half, half addrspace(1)* [[PTR:%.*]], align 2
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast half addrspace(1)* [[PTR]] to i16 addrspace(1)*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half [[NEW]] to i16
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast half [[LOADED]] to i16
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i16 addrspace(1)* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i16 [[NEWLOADED]] to half
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret half [[TMP6]]
+;
+  %res = atomicrmw fsub half addrspace(1)* %ptr, half %value seq_cst
+  ret half %res
+}
+
+define half @test_atomicrmw_fsub_f16_local(half addrspace(3)* %ptr, half %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f16_local(
+; GCN-NEXT:    [[TMP1:%.*]] = load half, half addrspace(3)* [[PTR:%.*]], align 2
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast half addrspace(3)* [[PTR]] to i16 addrspace(3)*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half [[NEW]] to i16
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast half [[LOADED]] to i16
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i16 addrspace(3)* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i16 [[NEWLOADED]] to half
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret half [[TMP6]]
+;
+  %res = atomicrmw fsub half addrspace(3)* %ptr, half %value seq_cst
+  ret half %res
+}
+
+define double @test_atomicrmw_fsub_f64_flat(double* %ptr, double %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f64_flat(
+; GCN-NEXT:    [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fsub double* %ptr, double %value seq_cst
+  ret double %res
+}
+
+define double @test_atomicrmw_fsub_f64_global(double addrspace(1)* %ptr, double %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f64_global(
+; GCN-NEXT:    [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fsub double addrspace(1)* %ptr, double %value seq_cst
+  ret double %res
+}
+
+define double @test_atomicrmw_fsub_f64_local(double addrspace(3)* %ptr, double %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f64_local(
+; GCN-NEXT:    [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8
+; GCN-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; GCN:       atomicrmw.start:
+; GCN-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT:    [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT:    [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)*
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GCN-NEXT:    [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GCN-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GCN-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GCN-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN:       atomicrmw.end:
+; GCN-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fsub double addrspace(3)* %ptr, double %value seq_cst
+  ret double %res
+}

Added: llvm/trunk/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll?rev=351851&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll (added)
+++ llvm/trunk/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll Tue Jan 22 10:36:06 2019
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+
+define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
+; CHECK-LABEL: @test_atomicrmw_fadd_f32(
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fadd float* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define double @test_atomicrmw_fadd_f64(double* %ptr, double %value) {
+; CHECK-LABEL: @test_atomicrmw_fadd_f64(
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CHECK-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fadd double* %ptr, double %value seq_cst
+  ret double %res
+}
+
+define float @test_atomicrmw_fadd_f32_as1(float addrspace(1)* %ptr, float %value) {
+; CHECK-LABEL: @test_atomicrmw_fadd_f32_as1(
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
+; CHECK-LABEL: @test_atomicrmw_fsub_f32(
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    ret float [[TMP6]]
+;
+  %res = atomicrmw fsub float* %ptr, float %value seq_cst
+  ret float %res
+}
+
+define double @test_atomicrmw_fsub_f64(double* %ptr, double %value) {
+; CHECK-LABEL: @test_atomicrmw_fsub_f64(
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CHECK-NEXT:    [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    ret double [[TMP6]]
+;
+  %res = atomicrmw fsub double* %ptr, double %value seq_cst
+  ret double %res
+}




More information about the llvm-commits mailing list