[Mlir-commits] [mlir] 0cd8422 - [MLIR] Eliminate unnecessary affine stores

William S. Moses llvmlistbot at llvm.org
Wed Jun 30 06:45:50 PDT 2021


Author: William S. Moses
Date: 2021-06-30T09:45:26-04:00
New Revision: 0cd8422e8caa4aeccbf9b9c92c9acb23a4ffa9c3

URL: https://github.com/llvm/llvm-project/commit/0cd8422e8caa4aeccbf9b9c92c9acb23a4ffa9c3
DIFF: https://github.com/llvm/llvm-project/commit/0cd8422e8caa4aeccbf9b9c92c9acb23a4ffa9c3.diff

LOG: [MLIR] Eliminate unnecessary affine stores

Deduce circumstances where an affine load could not possibly be read by an operation (such as an affine load), and if so, eliminate the load

Differential Revision: https://reviews.llvm.org/D105041

Added: 
    

Modified: 
    mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
    mlir/test/Dialect/Affine/scalrep.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
index 5be0dcdaea157..b6cce790f715d 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
@@ -68,6 +68,11 @@ struct AffineScalarReplacement
   void loadCSE(AffineReadOpInterface loadOp,
                SmallVectorImpl<Operation *> &loadOpsToErase,
                DominanceInfo &domInfo);
+
+  void findUnusedStore(AffineWriteOpInterface storeOp,
+                       SmallVectorImpl<Operation *> &storeOpsToErase,
+                       SmallPtrSetImpl<Value> &memrefsToErase,
+                       PostDominanceInfo &postDominanceInfo);
 };
 
 } // end anonymous namespace
@@ -256,6 +261,51 @@ bool hasNoInterveningEffect(Operation *start, T memOp) {
   return !hasSideEffect;
 }
 
+// This attempts to find stores which have no impact on the final result.
+// A writing op writeA will be eliminated if there exists an op writeB if
+// 1) writeA and writeB have mathematically equivalent affine access functions.
+// 2) writeB postdominates writeA.
+// 3) There is no potential read between writeA and writeB.
+void AffineScalarReplacement::findUnusedStore(
+    AffineWriteOpInterface writeA, SmallVectorImpl<Operation *> &opsToErase,
+    SmallPtrSetImpl<Value> &memrefsToErase,
+    PostDominanceInfo &postDominanceInfo) {
+
+  for (Operation *user : writeA.getMemRef().getUsers()) {
+    // Only consider writing operations.
+    auto writeB = dyn_cast<AffineWriteOpInterface>(user);
+    if (!writeB)
+      continue;
+
+    // The operations must be distinct.
+    if (writeB == writeA)
+      continue;
+
+    // Both operations must lie in the same region.
+    if (writeB->getParentRegion() != writeA->getParentRegion())
+      continue;
+
+    // Both operations must write to the same memory.
+    MemRefAccess srcAccess(writeB);
+    MemRefAccess destAccess(writeA);
+
+    if (srcAccess != destAccess)
+      continue;
+
+    // writeB must postdominate writeA.
+    if (!postDominanceInfo.postDominates(writeB, writeA))
+      continue;
+
+    // There cannot be an operation which reads from memory between
+    // the two writes.
+    if (!hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB))
+      continue;
+
+    opsToErase.push_back(writeA);
+    break;
+  }
+}
+
 /// Attempt to eliminate loadOp by replacing it with a value stored into memory
 /// which the load is guaranteed to retrieve. This check involves three
 /// components: 1) The store and load must be on the same location 2) The store
@@ -394,6 +444,7 @@ void AffineScalarReplacement::runOnFunction() {
   SmallPtrSet<Value, 4> memrefsToErase;
 
   auto &domInfo = getAnalysis<DominanceInfo>();
+  auto &postDomInfo = getAnalysis<PostDominanceInfo>();
 
   // Walk all load's and perform store to load forwarding.
   f.walk([&](AffineReadOpInterface loadOp) {
@@ -404,6 +455,15 @@ void AffineScalarReplacement::runOnFunction() {
   });
 
   // Erase all load op's whose results were replaced with store fwd'ed ones.
+  for (auto *op : opsToErase)
+    op->erase();
+  opsToErase.clear();
+
+  // Walk all store's and perform unused store elimination
+  f.walk([&](AffineWriteOpInterface storeOp) {
+    findUnusedStore(storeOp, opsToErase, memrefsToErase, postDomInfo);
+  });
+  // Erase all store op's which don't impact the program
   for (auto *op : opsToErase)
     op->erase();
 

diff  --git a/mlir/test/Dialect/Affine/scalrep.mlir b/mlir/test/Dialect/Affine/scalrep.mlir
index 452ff0939a185..308186fe676db 100644
--- a/mlir/test/Dialect/Affine/scalrep.mlir
+++ b/mlir/test/Dialect/Affine/scalrep.mlir
@@ -642,3 +642,38 @@ func @overlap_no_fwd(%N : index) -> f32 {
 // CHECK-NEXT:  return %{{.*}} : f32
 }
 
+// CHECK-LABEL: func @redundant_store_elim
+
+func @redundant_store_elim(%out : memref<512xf32>) {
+  %cf1 = constant 1.0 : f32
+  %cf2 = constant 2.0 : f32
+  affine.for %i = 0 to 16 {
+    affine.store %cf1, %out[32*%i] : memref<512xf32>
+    affine.store %cf2, %out[32*%i] : memref<512xf32>
+  }
+  return
+}
+
+// CHECK: affine.for
+// CHECK-NEXT:   affine.store
+// CHECK-NEXT: }
+
+// CHECK-LABEL: func @redundant_store_elim_fail
+
+func @redundant_store_elim_fail(%out : memref<512xf32>) {
+  %cf1 = constant 1.0 : f32
+  %cf2 = constant 2.0 : f32
+  affine.for %i = 0 to 16 {
+    affine.store %cf1, %out[32*%i] : memref<512xf32>
+    "test.use"(%out) : (memref<512xf32>) -> ()
+    affine.store %cf2, %out[32*%i] : memref<512xf32>
+  }
+  return
+}
+
+// CHECK: affine.for
+// CHECK-NEXT:   affine.store
+// CHECK-NEXT:   "test.use"
+// CHECK-NEXT:   affine.store
+// CHECK-NEXT: }
+


        


More information about the Mlir-commits mailing list