[llvm] r201143 - XCore target: Lower ATOMIC_LOAD & ATOMIC_STORE

Robert Lytton robert at xmos.com
Tue Feb 11 02:36:18 PST 2014


Author: rlytton
Date: Tue Feb 11 04:36:18 2014
New Revision: 201143

URL: http://llvm.org/viewvc/llvm-project?rev=201143&view=rev
Log:
 XCore target: Lower ATOMIC_LOAD & ATOMIC_STORE

Modified:
    llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp
    llvm/trunk/lib/Target/XCore/XCoreISelLowering.h
    llvm/trunk/test/CodeGen/XCore/atomic.ll

Modified: llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp?rev=201143&r1=201142&r2=201143&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp Tue Feb 11 04:36:18 2014
@@ -159,7 +159,12 @@ XCoreTargetLowering::XCoreTargetLowering
   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
 
   // Atomic operations
+  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
+  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
+  setInsertFencesForAtomic(true);
   setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
+  setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
 
   // TRAMPOLINE is custom lowered.
   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
@@ -223,6 +228,8 @@ LowerOperation(SDValue Op, SelectionDAG
   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
   case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, DAG);
+  case ISD::ATOMIC_LOAD:        return LowerATOMIC_LOAD(Op, DAG);
+  case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG);
   default:
     llvm_unreachable("unimplemented operand");
   }
@@ -964,6 +971,67 @@ LowerATOMIC_FENCE(SDValue Op, SelectionD
   return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
 }
 
+SDValue XCoreTargetLowering::
+LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
+  AtomicSDNode *N = cast<AtomicSDNode>(Op);
+  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
+  assert(N->getOrdering() <= Monotonic &&
+         "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+  if (N->getMemoryVT() == MVT::i32) {
+    if (N->getAlignment() < 4)
+      report_fatal_error("atomic load must be aligned");
+    return DAG.getLoad(getPointerTy(), SDLoc(Op), N->getChain(),
+                       N->getBasePtr(), N->getPointerInfo(),
+                       N->isVolatile(), N->isNonTemporal(),
+                       N->isInvariant(), N->getAlignment(),
+                       N->getTBAAInfo(), N->getRanges());
+  }
+  if (N->getMemoryVT() == MVT::i16) {
+    if (N->getAlignment() < 2)
+      report_fatal_error("atomic load must be aligned");
+    return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
+                          N->getBasePtr(), N->getPointerInfo(), MVT::i16,
+                          N->isVolatile(), N->isNonTemporal(),
+                          N->getAlignment(), N->getTBAAInfo());
+  }
+  if (N->getMemoryVT() == MVT::i8)
+    return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
+                          N->getBasePtr(), N->getPointerInfo(), MVT::i8,
+                          N->isVolatile(), N->isNonTemporal(),
+                          N->getAlignment(), N->getTBAAInfo());
+  return SDValue();
+}
+
+SDValue XCoreTargetLowering::
+LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
+  AtomicSDNode *N = cast<AtomicSDNode>(Op);
+  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
+  assert(N->getOrdering() <= Monotonic &&
+         "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+  if (N->getMemoryVT() == MVT::i32) {
+    if (N->getAlignment() < 4)
+      report_fatal_error("atomic store must be aligned");
+    return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(),
+                        N->getBasePtr(), N->getPointerInfo(),
+                        N->isVolatile(), N->isNonTemporal(),
+                        N->getAlignment(), N->getTBAAInfo());
+  }
+  if (N->getMemoryVT() == MVT::i16) {
+    if (N->getAlignment() < 2)
+      report_fatal_error("atomic store must be aligned");
+    return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
+                             N->getBasePtr(), N->getPointerInfo(), MVT::i16,
+                             N->isVolatile(), N->isNonTemporal(),
+                             N->getAlignment(), N->getTBAAInfo());
+  }
+  if (N->getMemoryVT() == MVT::i8)
+    return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
+                             N->getBasePtr(), N->getPointerInfo(), MVT::i8,
+                             N->isVolatile(), N->isNonTemporal(),
+                             N->getAlignment(), N->getTBAAInfo());
+  return SDValue();
+}
+
 //===----------------------------------------------------------------------===//
 //                      Calling Convention Implementation
 //===----------------------------------------------------------------------===//

Modified: llvm/trunk/lib/Target/XCore/XCoreISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreISelLowering.h?rev=201143&r1=201142&r2=201143&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreISelLowering.h (original)
+++ llvm/trunk/lib/Target/XCore/XCoreISelLowering.h Tue Feb 11 04:36:18 2014
@@ -172,6 +172,8 @@ namespace llvm {
     SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
 
     // Inline asm support
     std::pair<unsigned, const TargetRegisterClass*>

Modified: llvm/trunk/test/CodeGen/XCore/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/XCore/atomic.ll?rev=201143&r1=201142&r2=201143&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/XCore/atomic.ll (original)
+++ llvm/trunk/test/CodeGen/XCore/atomic.ll Tue Feb 11 04:36:18 2014
@@ -14,3 +14,79 @@ entry:
   fence seq_cst
   ret void
 }
+
+ at pool = external global i64
+
+define void @atomicloadstore() nounwind {
+entry:
+; CHECK-LABEL: atomicloadstore
+
+; CHECK: ldw r0, dp[pool]
+; CHECK-NEXT: #MEMBARRIER
+  %0 = load atomic i32* bitcast (i64* @pool to i32*) acquire, align 4
+
+; CHECK-NEXT: ldaw r1, dp[pool]
+; CHECK-NEXT: ldc r2, 0
+
+; CHECK-NEXT: ld16s r3, r1[r2]
+; CHECK-NEXT: #MEMBARRIER
+  %1 = load atomic i16* bitcast (i64* @pool to i16*) acquire, align 2
+
+; CHECK-NEXT: ld8u r11, r1[r2]
+; CHECK-NEXT: #MEMBARRIER
+  %2 = load atomic i8* bitcast (i64* @pool to i8*) acquire, align 1
+
+; CHECK-NEXT: ldw r4, dp[pool]
+; CHECK-NEXT: #MEMBARRIER
+  %3 = load atomic i32* bitcast (i64* @pool to i32*) seq_cst, align 4
+
+; CHECK-NEXT: ld16s r5, r1[r2]
+; CHECK-NEXT: #MEMBARRIER
+  %4 = load atomic i16* bitcast (i64* @pool to i16*) seq_cst, align 2
+
+; CHECK-NEXT: ld8u r6, r1[r2]
+; CHECK-NEXT: #MEMBARRIER
+  %5 = load atomic i8* bitcast (i64* @pool to i8*) seq_cst, align 1
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: stw r0, dp[pool]
+  store atomic i32 %0, i32* bitcast (i64* @pool to i32*) release, align 4
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st16 r3, r1[r2]
+  store atomic i16 %1, i16* bitcast (i64* @pool to i16*) release, align 2
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st8 r11, r1[r2]
+  store atomic i8 %2, i8* bitcast (i64* @pool to i8*) release, align 1
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: stw r4, dp[pool]
+; CHECK-NEXT: #MEMBARRIER
+  store atomic i32 %3, i32* bitcast (i64* @pool to i32*) seq_cst, align 4
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st16 r5, r1[r2]
+; CHECK-NEXT: #MEMBARRIER
+  store atomic i16 %4, i16* bitcast (i64* @pool to i16*) seq_cst, align 2
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st8 r6, r1[r2]
+; CHECK-NEXT: #MEMBARRIER
+  store atomic i8 %5, i8* bitcast (i64* @pool to i8*) seq_cst, align 1
+
+; CHECK-NEXT: ldw r0, dp[pool]
+; CHECK-NEXT: stw r0, dp[pool]
+; CHECK-NEXT: ld16s r0, r1[r2]
+; CHECK-NEXT: st16 r0, r1[r2]
+; CHECK-NEXT: ld8u r0, r1[r2]
+; CHECK-NEXT: st8 r0, r1[r2]
+  %6 = load atomic i32* bitcast (i64* @pool to i32*) monotonic, align 4
+  store atomic i32 %6, i32* bitcast (i64* @pool to i32*) monotonic, align 4
+  %7 = load atomic i16* bitcast (i64* @pool to i16*) monotonic, align 2
+  store atomic i16 %7, i16* bitcast (i64* @pool to i16*) monotonic, align 2
+  %8 = load atomic i8* bitcast (i64* @pool to i8*) monotonic, align 1
+  store atomic i8 %8, i8* bitcast (i64* @pool to i8*) monotonic, align 1
+
+  ret void
+}





More information about the llvm-commits mailing list