[llvm] r321382 - [MemorySSA] Allow reordering of loads that alias in the presence of volatile loads.

Alina Sbirlea via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 22 11:54:03 PST 2017


Author: asbirlea
Date: Fri Dec 22 11:54:03 2017
New Revision: 321382

URL: http://llvm.org/viewvc/llvm-project?rev=321382&view=rev
Log:
[MemorySSA] Allow reordering of loads that alias in the presence of volatile loads.

Summary:
Make MemorySSA allow reordering of two loads that may alias, when one is volatile.
This makes MemorySSA less conservative and behaving the same as the AliasSetTracker.
For more context, see D16875.

LLVM language reference: "The optimizers must not change the number of volatile operations or change their order of execution relative to other volatile operations. The optimizers may change the order of volatile operations relative to non-volatile operations. This is not Java’s “volatile” and has no cross-thread synchronization behavior."

Reviewers: george.burgess.iv, dberlin

Subscribers: sanjoy, reames, hfinkel, llvm-commits, Prazek

Differential Revision: https://reviews.llvm.org/D41525

Modified:
    llvm/trunk/lib/Analysis/MemorySSA.cpp
    llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll

Modified: llvm/trunk/lib/Analysis/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemorySSA.cpp?rev=321382&r1=321381&r2=321382&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Analysis/MemorySSA.cpp Fri Dec 22 11:54:03 2017
@@ -192,8 +192,6 @@ template <> struct DenseMapInfo<MemoryLo
   }
 };
 
-enum class Reorderability { Always, IfNoAlias, Never };
-
 } // end namespace llvm
 
 /// This does one-way checks to see if Use could theoretically be hoisted above
@@ -202,22 +200,16 @@ enum class Reorderability { Always, IfNo
 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
 /// MayClobber, with no potentially clobbering operations in between them.
 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
-static Reorderability getLoadReorderability(const LoadInst *Use,
-                                            const LoadInst *MayClobber) {
+static bool areLoadsReorderable(const LoadInst *Use,
+                                const LoadInst *MayClobber) {
   bool VolatileUse = Use->isVolatile();
   bool VolatileClobber = MayClobber->isVolatile();
   // Volatile operations may never be reordered with other volatile operations.
   if (VolatileUse && VolatileClobber)
-    return Reorderability::Never;
-
-  // The lang ref allows reordering of volatile and non-volatile operations.
-  // Whether an aliasing nonvolatile load and volatile load can be reordered,
-  // though, is ambiguous. Because it may not be best to exploit this ambiguity,
-  // we only allow volatile/non-volatile reordering if the volatile and
-  // non-volatile operations don't alias.
-  Reorderability Result = VolatileUse || VolatileClobber
-                              ? Reorderability::IfNoAlias
-                              : Reorderability::Always;
+    return false;
+  // Otherwise, volatile doesn't matter here. From the language reference:
+  // 'optimizers may change the order of volatile operations relative to
+  // non-volatile operations.'"
 
   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
   // is weaker, it can be moved above other loads. We just need to be sure that
@@ -229,9 +221,7 @@ static Reorderability getLoadReorderabil
   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
                                                      AtomicOrdering::Acquire);
-  if (SeqCstUse || MayClobberIsAcquire)
-    return Reorderability::Never;
-  return Result;
+  return !(SeqCstUse || MayClobberIsAcquire);
 }
 
 static bool instructionClobbersQuery(MemoryDef *MD,
@@ -265,18 +255,9 @@ static bool instructionClobbersQuery(Mem
     return isModOrRefSet(I);
   }
 
-  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
-    if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) {
-      switch (getLoadReorderability(UseLoad, DefLoad)) {
-      case Reorderability::Always:
-        return false;
-      case Reorderability::Never:
-        return true;
-      case Reorderability::IfNoAlias:
-        return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
-      }
-    }
-  }
+  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
+    if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
+      return !areLoadsReorderable(UseLoad, DefLoad);
 
   return isModSet(AA.getModRefInfo(DefInst, UseLoc));
 }

Modified: llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll?rev=321382&r1=321381&r2=321382&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll (original)
+++ llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll Fri Dec 22 11:54:03 2017
@@ -22,8 +22,7 @@ define i32 @foo() {
   ret i32 %4
 }
 
-; Ensuring that we don't automatically hoist nonvolatile loads around volatile
-; loads
+; Ensuring we allow hoisting nonvolatile loads around volatile loads.
 ; CHECK-LABEL define void @volatile_only
 define void @volatile_only(i32* %arg1, i32* %arg2) {
   ; Trivially NoAlias/MustAlias
@@ -36,7 +35,7 @@ define void @volatile_only(i32* %arg1, i
 ; CHECK: MemoryUse(liveOnEntry)
 ; CHECK-NEXT: load i32, i32* %b
   load i32, i32* %b
-; CHECK: MemoryUse(1)
+; CHECK: MemoryUse(liveOnEntry)
 ; CHECK-NEXT: load i32, i32* %a
   load i32, i32* %a
 
@@ -44,7 +43,7 @@ define void @volatile_only(i32* %arg1, i
 ; CHECK: 2 = MemoryDef(1)
 ; CHECK-NEXT: load volatile i32, i32* %arg1
   load volatile i32, i32* %arg1
-; CHECK: MemoryUse(2)
+; CHECK: MemoryUse(liveOnEntry)
 ; CHECK-NEXT: load i32, i32* %arg2
   load i32, i32* %arg2
 
@@ -75,10 +74,10 @@ define void @volatile_atomics(i32* %arg1
 ; CHECK: MemoryUse(1)
 ; CHECK-NEXT: load atomic i32, i32* %b unordered, align 4
   load atomic i32, i32* %b unordered, align 4
-; CHECK: MemoryUse(2)
+; CHECK: MemoryUse(1)
 ; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
   load atomic i32, i32* %a unordered, align 4
-; CHECK: MemoryUse(2)
+; CHECK: MemoryUse(1)
 ; CHECK-NEXT: load i32, i32* %a
   load i32, i32* %a
 
@@ -86,7 +85,7 @@ define void @volatile_atomics(i32* %arg1
 ; CHECK: 3 = MemoryDef(2)
 ; CHECK-NEXT: load atomic volatile i32, i32* %arg1 monotonic, align 4
   load atomic volatile i32, i32* %arg1 monotonic, align 4
-; CHECK: MemoryUse(3)
+; CHECK: MemoryUse(1)
 ; CHECK-NEXT: load i32, i32* %arg2
   load i32, i32* %arg2
 




More information about the llvm-commits mailing list