[llvm-commits] [llvm] r138505 - in /llvm/trunk: lib/Target/X86/X86ISelLowering.cpp test/CodeGen/X86/atomic-load-store-wide.ll

Eli Friedman eli.friedman at gmail.com
Wed Aug 24 15:33:28 PDT 2011


Author: efriedma
Date: Wed Aug 24 17:33:28 2011
New Revision: 138505

URL: http://llvm.org/viewvc/llvm-project?rev=138505&view=rev
Log:
Hook up 64-bit atomic load/store on x86-32.  I plan to write more efficient implementations eventually.


Added:
    llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll
Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=138505&r1=138504&r2=138505&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Aug 24 17:33:28 2011
@@ -468,6 +468,7 @@
   }
 
   if (!Subtarget->is64Bit()) {
+    setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
     setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
     setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
@@ -10003,15 +10004,20 @@
 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
   SDNode *Node = Op.getNode();
   DebugLoc dl = Node->getDebugLoc();
+  EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
 
   // Convert seq_cst store -> xchg
-  if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent) {
+  // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
+  // FIXME: On 32-bit, store -> fist or movq would be more efficient
+  //        (The only way to get a 16-byte store is cmpxchg16b)
+  // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
+  if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
+      !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
     SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
                                  cast<AtomicSDNode>(Node)->getMemoryVT(),
                                  Node->getOperand(0),
                                  Node->getOperand(1), Node->getOperand(2),
-                                 cast<AtomicSDNode>(Node)->getSrcValue(),
-                                 cast<AtomicSDNode>(Node)->getAlignment(),
+                                 cast<AtomicSDNode>(Node)->getMemOperand(),
                                  cast<AtomicSDNode>(Node)->getOrdering(),
                                  cast<AtomicSDNode>(Node)->getSynchScope());
     return Swap.getValue(1);
@@ -10121,6 +10127,28 @@
   }
 }
 
+static void ReplaceATOMIC_LOAD(SDNode *Node,
+                                  SmallVectorImpl<SDValue> &Results,
+                                  SelectionDAG &DAG) {
+  DebugLoc dl = Node->getDebugLoc();
+  EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
+
+  // Convert wide load -> cmpxchg8b/cmpxchg16b
+  // FIXME: On 32-bit, load -> fild or movq would be more efficient
+  //        (The only way to get a 16-byte load is cmpxchg16b)
+  // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment.
+  SDValue Zero = DAG.getConstant(0, cast<AtomicSDNode>(Node)->getMemoryVT());
+  SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
+                               cast<AtomicSDNode>(Node)->getMemoryVT(),
+                               Node->getOperand(0),
+                               Node->getOperand(1), Zero, Zero,
+                               cast<AtomicSDNode>(Node)->getMemOperand(),
+                               cast<AtomicSDNode>(Node)->getOrdering(),
+                               cast<AtomicSDNode>(Node)->getSynchScope());
+  Results.push_back(Swap.getValue(0));
+  Results.push_back(Swap.getValue(1));
+}
+
 void X86TargetLowering::
 ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
                         SelectionDAG &DAG, unsigned NewOp) const {
@@ -10244,6 +10272,8 @@
   case ISD::ATOMIC_SWAP:
     ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG);
     return;
+  case ISD::ATOMIC_LOAD:
+    ReplaceATOMIC_LOAD(N, Results, DAG);
   }
 }
 

Added: llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll?rev=138505&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll (added)
+++ llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll Wed Aug 24 17:33:28 2011
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; 64-bit load/store on x86-32
+; FIXME: The generated code can be substantially improved.
+
+define void @test1(i64* %ptr, i64 %val1) {
+; CHECK: test1
+; CHECK: cmpxchg8b
+; CHECK-NEXT: jne
+  store atomic i64 %val1, i64* %ptr seq_cst, align 4
+  ret void
+}
+
+define i64 @test2(i64* %ptr) {
+; CHECK: test2
+; CHECK: cmpxchg8b
+  %val = load atomic i64* %ptr seq_cst, align 4
+  ret i64 %val
+}





More information about the llvm-commits mailing list