[llvm] r266999 - [unordered] Add tests and conservative handling in support of future changes [NFCI]

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 21 09:51:08 PDT 2016


Author: reames
Date: Thu Apr 21 11:51:08 2016
New Revision: 266999

URL: http://llvm.org/viewvc/llvm-project?rev=266999&view=rev
Log:
[unordered] Add tests and conservative handling in support of future changes [NFCI]

This change adds a couple of test cases to make sure FindAvailableLoadedValue does the right thing.  At the moment, the code added is dead, but separating it makes follow on changes far more obvious.


Modified:
    llvm/trunk/lib/Analysis/Loads.cpp
    llvm/trunk/test/Transforms/InstCombine/atomic.ll

Modified: llvm/trunk/lib/Analysis/Loads.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Loads.cpp?rev=266999&r1=266998&r2=266999&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/Loads.cpp (original)
+++ llvm/trunk/lib/Analysis/Loads.cpp Thu Apr 21 11:51:08 2016
@@ -416,6 +416,14 @@ Value *llvm::FindAvailableLoadedValue(Lo
   Value *Ptr = Load->getPointerOperand();
   Type *AccessTy = Load->getType();
 
+  // We can never remove a volatile load
+  if (Load->isVolatile())
+    return nullptr;
+
+  // Anything stronger than unordered is currently unimplemented.
+  if (!Load->isUnordered())
+    return nullptr;
+
   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
 
   // Try to get the store size for the type.
@@ -445,6 +453,12 @@ Value *llvm::FindAvailableLoadedValue(Lo
       if (AreEquivalentAddressValues(
               LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
           CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
+
+        // We can value forward from an atomic to a non-atomic, but not the
+        // other way around.
+        if (LI->isAtomic() < Load->isAtomic())
+          return nullptr;
+
         if (AATags)
           LI->getAAMetadata(*AATags);
         return LI;
@@ -458,6 +472,12 @@ Value *llvm::FindAvailableLoadedValue(Lo
       if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
           CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
                                                AccessTy, DL)) {
+
+        // We can value forward from an atomic to a non-atomic, but not the
+        // other way around.
+        if (SI->isAtomic() < Load->isAtomic())
+          return nullptr;
+
         if (AATags)
           SI->getAAMetadata(*AATags);
         return SI->getOperand(0);

Modified: llvm/trunk/test/Transforms/InstCombine/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/atomic.ll?rev=266999&r1=266998&r2=266999&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/atomic.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/atomic.ll Thu Apr 21 11:51:08 2016
@@ -5,12 +5,58 @@ target triple = "x86_64-apple-macosx10.7
 
 ; Check transforms involving atomic operations
 
+define i32 @test1(i32* %p) {
+; CHECK-LABEL: define i32 @test1(
+; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
+; CHECK: shl i32 %x, 1
+  %x = load atomic i32, i32* %p seq_cst, align 4
+  %y = load i32, i32* %p, align 4
+  %z = add i32 %x, %y
+  ret i32 %z
+}
+
 define i32 @test2(i32* %p) {
 ; CHECK-LABEL: define i32 @test2(
+; CHECK: %x = load volatile i32, i32* %p, align 4
+; CHECK: %y = load volatile i32, i32* %p, align 4
+  %x = load volatile i32, i32* %p, align 4
+  %y = load volatile i32, i32* %p, align 4
+  %z = add i32 %x, %y
+  ret i32 %z
+}
+
+; The exact semantics of mixing volatile and non-volatile on the same
+; memory location are a bit unclear, but conservatively, we know we don't
+; want to remove the volatile.
+define i32 @test3(i32* %p) {
+; CHECK-LABEL: define i32 @test3(
+; CHECK: %x = load volatile i32, i32* %p, align 4
+  %x = load volatile i32, i32* %p, align 4
+  %y = load i32, i32* %p, align 4
+  %z = add i32 %x, %y
+  ret i32 %z
+}
+
+; FIXME: Forwarding from a stronger atomic is fine
+define i32 @test4(i32* %p) {
+; CHECK-LABEL: define i32 @test4(
 ; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
-; CHECK: shl i32 %x, 1
+; CHECK: %y = load atomic i32, i32* %p unordered, align 4
   %x = load atomic i32, i32* %p seq_cst, align 4
+  %y = load atomic i32, i32* %p unordered, align 4
+  %z = add i32 %x, %y
+  ret i32 %z
+}
+
+; Forwarding from a non-atomic is not.  (The earlier load 
+; could in priciple be promoted to atomic and then forwarded, 
+; but we can't just  drop the atomic from the load.)
+define i32 @test5(i32* %p) {
+; CHECK-LABEL: define i32 @test5(
+; CHECK: %x = load atomic i32, i32* %p unordered, align 4
+  %x = load atomic i32, i32* %p unordered, align 4
   %y = load i32, i32* %p, align 4
   %z = add i32 %x, %y
   ret i32 %z
 }
+




More information about the llvm-commits mailing list