[llvm-commits] [llvm] r137648 - in /llvm/trunk: lib/Analysis/AliasSetTracker.cpp lib/Transforms/Scalar/LICM.cpp test/Transforms/LICM/atomics.ll

Eli Friedman eli.friedman at gmail.com
Mon Aug 15 13:52:09 PDT 2011


Author: efriedma
Date: Mon Aug 15 15:52:09 2011
New Revision: 137648

URL: http://llvm.org/viewvc/llvm-project?rev=137648&view=rev
Log:
Atomic load/store support in LICM.


Added:
    llvm/trunk/test/Transforms/LICM/atomics.ll
Modified:
    llvm/trunk/lib/Analysis/AliasSetTracker.cpp
    llvm/trunk/lib/Transforms/Scalar/LICM.cpp

Modified: llvm/trunk/lib/Analysis/AliasSetTracker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/AliasSetTracker.cpp?rev=137648&r1=137647&r2=137648&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/AliasSetTracker.cpp (original)
+++ llvm/trunk/lib/Analysis/AliasSetTracker.cpp Mon Aug 15 15:52:09 2011
@@ -126,8 +126,6 @@
 void AliasSet::addUnknownInst(Instruction *I, AliasAnalysis &AA) {
   UnknownInsts.push_back(I);
 
-  if (!I->mayReadOrWriteMemory())
-    return;
   if (!I->mayWriteToMemory()) {
     AliasTy = MayAlias;
     AccessTy |= Refs;
@@ -297,22 +295,28 @@
 
 
 bool AliasSetTracker::add(LoadInst *LI) {
+  if (LI->getOrdering() > Monotonic) return addUnknown(LI);
+  AliasSet::AccessType ATy = AliasSet::Refs;
+  if (!LI->isUnordered()) ATy = AliasSet::ModRef;
   bool NewPtr;
   AliasSet &AS = addPointer(LI->getOperand(0),
                             AA.getTypeStoreSize(LI->getType()),
                             LI->getMetadata(LLVMContext::MD_tbaa),
-                            AliasSet::Refs, NewPtr);
+                            ATy, NewPtr);
   if (LI->isVolatile()) AS.setVolatile();
   return NewPtr;
 }
 
 bool AliasSetTracker::add(StoreInst *SI) {
+  if (SI->getOrdering() > Monotonic) return addUnknown(SI);
+  AliasSet::AccessType ATy = AliasSet::Mods;
+  if (!SI->isUnordered()) ATy = AliasSet::ModRef;
   bool NewPtr;
   Value *Val = SI->getOperand(0);
   AliasSet &AS = addPointer(SI->getOperand(1),
                             AA.getTypeStoreSize(Val->getType()),
                             SI->getMetadata(LLVMContext::MD_tbaa),
-                            AliasSet::Mods, NewPtr);
+                            ATy, NewPtr);
   if (SI->isVolatile()) AS.setVolatile();
   return NewPtr;
 }

Modified: llvm/trunk/lib/Transforms/Scalar/LICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LICM.cpp?rev=137648&r1=137647&r2=137648&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LICM.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LICM.cpp Mon Aug 15 15:52:09 2011
@@ -362,8 +362,8 @@
 bool LICM::canSinkOrHoistInst(Instruction &I) {
   // Loads have extra constraints we have to verify before we can hoist them.
   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
-    if (LI->isVolatile())
-      return false;        // Don't hoist volatile loads!
+    if (!LI->isUnordered())
+      return false;        // Don't hoist volatile/atomic loads!
 
     // Loads from constant memory are always safe to move, even if they end up
     // in the same alias set as something that ends up being modified.
@@ -722,15 +722,18 @@
 
       // If there is an non-load/store instruction in the loop, we can't promote
       // it.
-      if (isa<LoadInst>(Use)) {
-        assert(!cast<LoadInst>(Use)->isVolatile() && "AST broken");
+      if (LoadInst *load = dyn_cast<LoadInst>(Use)) {
+        assert(!load->isVolatile() && "AST broken");
+        if (!load->isSimple())
+          return;
       } else if (StoreInst *store = dyn_cast<StoreInst>(Use)) {
         // Stores *of* the pointer are not interesting, only stores *to* the
         // pointer.
         if (Use->getOperand(1) != ASIV)
           continue;
-        unsigned InstAlignment = store->getAlignment();
-        assert(!cast<StoreInst>(Use)->isVolatile() && "AST broken");
+        assert(!store->isVolatile() && "AST broken");
+        if (!store->isSimple())
+          return;
 
         // Note that we only check GuaranteedToExecute inside the store case
         // so that we do not introduce stores where they did not exist before
@@ -740,6 +743,7 @@
         // restrictive (and performant) alignment and if we are sure this
         // instruction will be executed, update the alignment.
         // Larger is better, with the exception of 0 being the best alignment.
+        unsigned InstAlignment = store->getAlignment();
         if ((InstAlignment > Alignment || InstAlignment == 0)
             && (Alignment != 0))
           if (isGuaranteedToExecute(*Use)) {

Added: llvm/trunk/test/Transforms/LICM/atomics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LICM/atomics.ll?rev=137648&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LICM/atomics.ll (added)
+++ llvm/trunk/test/Transforms/LICM/atomics.ll Mon Aug 15 15:52:09 2011
@@ -0,0 +1,79 @@
+; RUN: opt < %s -S -basicaa -licm | FileCheck %s
+
+; Check that we can hoist unordered loads
+define i32 @test1(i32* nocapture %y) nounwind uwtable ssp {
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %val = load atomic i32* %y unordered, align 4
+  %inc = add nsw i32 %i, 1
+  %exitcond = icmp eq i32 %inc, %val
+  br i1 %exitcond, label %end, label %loop
+
+end:
+  ret i32 %val
+; CHECK: define i32 @test1(
+; CHECK: load atomic
+; CHECK-NEXT: br label %loop
+}
+
+; Check that we don't sink/hoist monotonic loads
+; (Strictly speaking, it's not forbidden, but it's supposed to be possible to
+; use monotonic for spinlock-like constructs.)
+define i32 @test2(i32* nocapture %y) nounwind uwtable ssp {
+entry:
+  br label %loop
+
+loop:
+  %val = load atomic i32* %y monotonic, align 4
+  %exitcond = icmp ne i32 %val, 0
+  br i1 %exitcond, label %end, label %loop
+
+end:
+  ret i32 %val
+; CHECK: define i32 @test2(
+; CHECK: load atomic
+; CHECK-NEXT: %exitcond = icmp ne
+; CHECK-NEXT: br i1 %exitcond, label %end, label %loop
+}
+
+; Check that we hoist unordered around monotonic.
+; (The noalias shouldn't be necessary in theory, but LICM isn't quite that
+; smart yet.)
+define i32 @test3(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
+entry:
+  br label %loop
+
+loop:
+  %vala = load atomic i32* %y monotonic, align 4
+  %valb = load atomic i32* %x unordered, align 4
+  %exitcond = icmp ne i32 %vala, %valb
+  br i1 %exitcond, label %end, label %loop
+
+end:
+  ret i32 %vala
+; CHECK: define i32 @test3(
+; CHECK: load atomic i32* %x unordered
+; CHECK-NEXT: br label %loop
+}
+
+; Don't try to "sink" unordered stores yet; it is legal, but the machinery
+; isn't there.
+define i32 @test4(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
+entry:
+  br label %loop
+
+loop:
+  %vala = load atomic i32* %y monotonic, align 4
+  store atomic i32 %vala, i32* %x unordered, align 4
+  %exitcond = icmp ne i32 %vala, 0
+  br i1 %exitcond, label %end, label %loop
+
+end:
+  ret i32 %vala
+; CHECK: define i32 @test4(
+; CHECK: load atomic i32* %y monotonic
+; CHECK-NEXT: store atomic
+}





More information about the llvm-commits mailing list