[llvm-commits] [llvm] r137664 - in /llvm/trunk: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp lib/Transforms/InstCombine/InstCombinePHI.cpp lib/Transforms/InstCombine/InstructionCombining.cpp test/Transforms/InstCombine/atomic.ll

Eli Friedman eli.friedman at gmail.com
Mon Aug 15 15:09:40 PDT 2011


Author: efriedma
Date: Mon Aug 15 17:09:40 2011
New Revision: 137664

URL: http://llvm.org/viewvc/llvm-project?rev=137664&view=rev
Log:
Update instcombine for atomic load/store.


Added:
    llvm/trunk/test/Transforms/InstCombine/atomic.ll
Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
    llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp
    llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=137664&r1=137663&r2=137664&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Mon Aug 15 17:09:40 2011
@@ -132,6 +132,7 @@
         LoadInst *NewLoad = 
           IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
         NewLoad->setAlignment(LI.getAlignment());
+        NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
         // Now cast the result of the load.
         return new BitCastInst(NewLoad, LI.getType());
       }
@@ -162,8 +163,9 @@
     if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
       return Res;
 
-  // None of the following transforms are legal for volatile loads.
-  if (LI.isVolatile()) return 0;
+  // None of the following transforms are legal for volatile/atomic loads.
+  // FIXME: Some of it is okay for atomic loads; needs refactoring.
+  if (!LI.isSimple()) return 0;
   
   // Do really simple store-to-load forwarding and load CSE, to catch cases
   // where there are several consecutive memory accesses to the same location,
@@ -368,21 +370,6 @@
   Value *Val = SI.getOperand(0);
   Value *Ptr = SI.getOperand(1);
 
-  // If the RHS is an alloca with a single use, zapify the store, making the
-  // alloca dead.
-  if (!SI.isVolatile()) {
-    if (Ptr->hasOneUse()) {
-      if (isa<AllocaInst>(Ptr)) 
-        return EraseInstFromFunction(SI);
-      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
-        if (isa<AllocaInst>(GEP->getOperand(0))) {
-          if (GEP->getOperand(0)->hasOneUse())
-            return EraseInstFromFunction(SI);
-        }
-      }
-    }
-  }
-
   // Attempt to improve the alignment.
   if (TD) {
     unsigned KnownAlign =
@@ -398,6 +385,23 @@
       SI.setAlignment(EffectiveStoreAlign);
   }
 
+  // Don't hack volatile/atomic stores.
+  // FIXME: Some bits are legal for atomic stores; needs refactoring.
+  if (!SI.isSimple()) return 0;
+
+  // If the RHS is an alloca with a single use, zapify the store, making the
+  // alloca dead.
+  if (Ptr->hasOneUse()) {
+    if (isa<AllocaInst>(Ptr)) 
+      return EraseInstFromFunction(SI);
+    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
+      if (isa<AllocaInst>(GEP->getOperand(0))) {
+        if (GEP->getOperand(0)->hasOneUse())
+          return EraseInstFromFunction(SI);
+      }
+    }
+  }
+
   // Do really simple DSE, to catch cases where there are several consecutive
   // stores to the same location, separated by a few arithmetic operations. This
   // situation often occurs with bitfield accesses.
@@ -415,8 +419,8 @@
     
     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
       // Prev store isn't volatile, and stores to the same location?
-      if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
-                                                          SI.getOperand(1))) {
+      if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
+                                                        SI.getOperand(1))) {
         ++NumDeadStore;
         ++BBI;
         EraseInstFromFunction(*PrevSI);
@@ -430,7 +434,7 @@
     // then *this* store is dead (X = load P; store X -> P).
     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
-          !SI.isVolatile())
+          LI->isSimple())
         return EraseInstFromFunction(SI);
       
       // Otherwise, this is a load from some other location.  Stores before it
@@ -442,9 +446,6 @@
     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
       break;
   }
-  
-  
-  if (SI.isVolatile()) return 0;  // Don't hack volatile stores.
 
   // store X, null    -> turns into 'unreachable' in SimplifyCFG
   if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
@@ -547,11 +548,11 @@
         return false;
       --BBI;
     }
-    // If this isn't a store, isn't a store to the same location, or if the
-    // alignments differ, bail out.
+    // If this isn't a store, isn't a store to the same location, or is not the
+    // right kind of store, bail out.
     OtherStore = dyn_cast<StoreInst>(BBI);
     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
-        OtherStore->getAlignment() != SI.getAlignment())
+        !SI.isSameOperationAs(OtherStore))
       return false;
   } else {
     // Otherwise, the other block ended with a conditional branch. If one of the
@@ -567,7 +568,7 @@
       // Check to see if we find the matching store.
       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
-            OtherStore->getAlignment() != SI.getAlignment())
+            !SI.isSameOperationAs(OtherStore))
           return false;
         break;
       }
@@ -601,8 +602,10 @@
   // insert it.
   BBI = DestBB->getFirstNonPHI();
   StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
-                                   OtherStore->isVolatile(),
-                                   SI.getAlignment());
+                                   SI.isVolatile(),
+                                   SI.getAlignment(),
+                                   SI.getOrdering(),
+                                   SI.getSynchScope());
   InsertNewInstBefore(NewSI, *BBI);
   NewSI->setDebugLoc(OtherStore->getDebugLoc()); 
 

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp?rev=137664&r1=137663&r2=137664&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp Mon Aug 15 17:09:40 2011
@@ -286,7 +286,12 @@
 
 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
   LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
-  
+
+  // FIXME: This is overconservative; this transform is allowed in some cases
+  // for atomic operations.
+  if (FirstLI->isAtomic())
+    return 0;
+
   // When processing loads, we need to propagate two bits of information to the
   // sunk load: whether it is volatile, and what its alignment is.  We currently
   // don't sink loads when some have their alignment specified and some don't.

Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=137664&r1=137663&r2=137664&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Mon Aug 15 17:09:40 2011
@@ -1379,7 +1379,7 @@
     // load from a GEP. This reduces the size of the load.
     // FIXME: If a load is used only by extractvalue instructions then this
     //        could be done regardless of having multiple uses.
-    if (!L->isVolatile() && L->hasOneUse()) {
+    if (L->isSimple() && L->hasOneUse()) {
       // extractvalue has integer indices, getelementptr has Value*s. Convert.
       SmallVector<Value*, 4> Indices;
       // Prefix an i32 0 since we need the first element.

Added: llvm/trunk/test/Transforms/InstCombine/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/atomic.ll?rev=137664&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/atomic.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/atomic.ll Mon Aug 15 17:09:40 2011
@@ -0,0 +1,15 @@
+; RUN: opt -S < %s -instcombine | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+; Check transforms involving atomic operations
+
+define i32* @test1(i8** %p) {
+; CHECK: define i32* @test1
+; CHECK: load atomic i8** %p monotonic, align 8
+  %c = bitcast i8** %p to i32**
+  %r = load atomic i32** %c monotonic, align 8
+  ret i32* %r
+}
+





More information about the llvm-commits mailing list