[llvm] r255739 - [EarlyCSE] DSE of stores which write back loaded values

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 15 17:01:31 PST 2015


Author: reames
Date: Tue Dec 15 19:01:30 2015
New Revision: 255739

URL: http://llvm.org/viewvc/llvm-project?rev=255739&view=rev
Log:
[EarlyCSE] DSE of stores which write back loaded values

Extend EarlyCSE with an additional style of dead store elimination. If we write back a value just read from that memory location, we can eliminate the store under the assumption that the value hasn't changed.

I'm implementing this mostly because I noticed the omission when looking at the code. It seemed strange to have InstCombine have a peephole which was more powerful than EarlyCSE. :)

Differential Revision: http://reviews.llvm.org/D15397


Modified:
    llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
    llvm/trunk/test/Transforms/EarlyCSE/basic.ll

Modified: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=255739&r1=255738&r2=255739&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp Tue Dec 15 19:01:30 2015
@@ -687,6 +687,33 @@ bool EarlyCSE::processNode(DomTreeNode *
         continue;
       }
 
+    // write back DSE - If we write back the same value we just loaded from
+    // the same location and haven't passed any intervening writes or ordering
+    // operations, we can remove the write.  The primary benefit is in allowing
+    // the available load table to remain valid and value forward past where
+    // the store originally was.
+    if (MemInst.isValid() && MemInst.isStore()) {
+      LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
+      if (InVal.Data &&
+          InVal.Data == getOrCreateResult(Inst, InVal.Data->getType()) &&
+          InVal.Generation == CurrentGeneration &&
+          InVal.MatchingId == MemInst.getMatchingId() &&
+          // We don't yet handle removing stores with ordering of any kind.
+          !MemInst.isVolatile() && MemInst.isUnordered()) {
+        assert((!LastStore ||
+                ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
+                MemInst.getPointerOperand()) &&
+               "can't have an intervening store!");
+        DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
+        Inst->eraseFromParent();
+        Changed = true;
+        ++NumDSE;
+        // We can avoid incrementing the generation count since we were able
+        // to eliminate this store.
+        continue;
+      }
+    }
+
     // Okay, this isn't something we can CSE at all.  Check to see if it is
     // something that could modify memory.  If so, our available memory values
     // cannot be used so bump the generation count.

Modified: llvm/trunk/test/Transforms/EarlyCSE/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/EarlyCSE/basic.ll?rev=255739&r1=255738&r2=255739&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/EarlyCSE/basic.ll (original)
+++ llvm/trunk/test/Transforms/EarlyCSE/basic.ll Tue Dec 15 19:01:30 2015
@@ -203,3 +203,77 @@ define i32 @test12(i1 %B, i32* %P1, i32*
   ; CHECK: load i32, i32* %P1
   ; CHECK: load i32, i32* %P1
 }
+
+define void @dse1(i32 *%P) {
+; CHECK-LABEL: @dse1
+; CHECK-NOT: store
+  %v = load i32, i32* %P
+  store i32 %v, i32* %P
+  ret void
+}
+
+define void @dse2(i32 *%P) {
+; CHECK-LABEL: @dse2
+; CHECK-NOT: store
+  %v = load atomic i32, i32* %P seq_cst, align 4
+  store i32 %v, i32* %P
+  ret void
+}
+
+define void @dse3(i32 *%P) {
+; CHECK-LABEL: @dse3
+; CHECK-NOT: store
+  %v = load atomic i32, i32* %P seq_cst, align 4
+  store atomic i32 %v, i32* %P unordered, align 4
+  ret void
+}
+
+define i32 @dse4(i32 *%P, i32 *%Q) {
+; CHECK-LABEL: @dse4
+; CHECK-NOT: store
+; CHECK: ret i32 0
+  %a = load i32, i32* %Q
+  %v = load atomic i32, i32* %P unordered, align 4
+  store atomic i32 %v, i32* %P unordered, align 4
+  %b = load i32, i32* %Q
+  %res = sub i32 %a, %b
+  ret i32 %res
+}
+
+; Note that in this example, %P and %Q could in fact be the same
+; pointer.  %v could be different than the value observed for %a
+; and that's okay because we're using relaxed memory ordering.  
+; The only guarantee we have to provide is that each of the loads 
+; has to observe some value written to that location.  We  do 
+; not have to respect the order in which those writes were done.  
+define i32 @dse5(i32 *%P, i32 *%Q) {
+; CHECK-LABEL: @dse5
+; CHECK-NOT: store
+; CHECK: ret i32 0
+  %v = load atomic i32, i32* %P unordered, align 4
+  %a = load atomic i32, i32* %Q unordered, align 4
+  store atomic i32 %v, i32* %P unordered, align 4
+  %b = load atomic i32, i32* %Q unordered, align 4
+  %res = sub i32 %a, %b
+  ret i32 %res
+}
+
+
+define void @dse_neg1(i32 *%P) {
+; CHECK-LABEL: @dse_neg1
+; CHECK: store
+  %v = load i32, i32* %P
+  store i32 5, i32* %P
+  ret void
+}
+
+; Could remove the store, but only if ordering was somehow
+; encoded.
+define void @dse_neg2(i32 *%P) {
+; CHECK-LABEL: @dse_neg2
+; CHECK: store
+  %v = load i32, i32* %P
+  store atomic i32 %v, i32* %P seq_cst, align 4
+  ret void
+}
+




More information about the llvm-commits mailing list