[Mlir-commits] [mlir] [MLIR][Affine] Check dependences during MDG init (PR #156422)
Arnab Dutta
llvmlistbot at llvm.org
Mon Sep 8 20:07:50 PDT 2025
================
@@ -241,7 +242,96 @@ addNodeToMDG(Operation *nodeOp, MemRefDependenceGraph &mdg,
return &node;
}
-bool MemRefDependenceGraph::init() {
+/// Returns the memref being read/written by a memref/affine load/store op.
+static Value getMemRef(Operation *memOp) {
+ if (auto memrefLoad = dyn_cast<memref::LoadOp>(memOp))
+ return memrefLoad.getMemRef();
+ if (auto affineLoad = dyn_cast<AffineReadOpInterface>(memOp))
+ return affineLoad.getMemRef();
+ if (auto memrefStore = dyn_cast<memref::StoreOp>(memOp))
+ return memrefStore.getMemRef();
+ if (auto affineStore = dyn_cast<AffineWriteOpInterface>(memOp))
+ return affineStore.getMemRef();
+ llvm_unreachable("unexpected op");
+}
+
+/// Returns true if there may be a dependence on `memref` from srcNode's
+/// memory ops to dstNode's memory ops, while using the affine memory
+/// dependence analysis checks. The method assumes that there is at least one
+/// memory op in srcNode's loads and stores on `memref`, and similarly for
+/// `dstNode`. `srcNode.op` and `destNode.op` are expected to be nested in the
+/// same block and so the dependences are tested at the depth of that block.
+static bool mayDependence(const Node &srcNode, const Node &dstNode,
+ Value memref) {
+ assert(srcNode.op->getBlock() == dstNode.op->getBlock());
+ if (!isa<AffineForOp>(srcNode.op) || !isa<AffineForOp>(dstNode.op))
+ return true;
+
+ // Conservatively handle dependences involving non-affine load/stores. Return
+ // true if there exists a conflicting read/write access involving such.
+ auto hasNonAffineDep = [&](ArrayRef<Operation *> srcOps,
+ ArrayRef<Operation *> dstOps) {
+ return llvm::any_of(srcOps, [&](Operation *srcOp) {
+ Value srcMemref = getMemRef(srcOp);
+ if (srcMemref != memref)
+ return false;
+ return llvm::find_if(dstOps, [&](Operation *dstOp) {
+ return srcMemref == getMemRef(dstOp);
+ }) != dstOps.end();
+ });
+ };
+
+ SmallVector<Operation *> dstOps;
+ // Between non-affine src stores and dst load/store.
+ llvm::append_range(dstOps, llvm::concat<Operation *const>(
+ dstNode.loads, dstNode.stores,
+ dstNode.memrefLoads, dstNode.memrefStores));
+ if (hasNonAffineDep(srcNode.memrefStores, dstOps))
+ return true;
+ // Between non-affine loads and dst stores.
+ dstOps.clear();
+ llvm::append_range(dstOps, llvm::concat<Operation *const>(
+ dstNode.stores, dstNode.memrefStores));
+ if (hasNonAffineDep(srcNode.memrefLoads, dstOps))
+ return true;
+ // Between affine stores and memref load/stores.
+ dstOps.clear();
+ llvm::append_range(dstOps, llvm::concat<Operation *const>(
+ dstNode.memrefLoads, dstNode.memrefStores));
+ if (hasNonAffineDep(srcNode.stores, dstOps))
+ return true;
+ // Between affine loads and memref stores.
+ dstOps.clear();
+ llvm::append_range(dstOps, llvm::concat<Operation *const>(
----------------
arnab-polymage wrote:
I do not get this. In the comment above you mention `Between affine loads and memref stores.`, but you're considering affine.stores inside destination nest. Why?
https://github.com/llvm/llvm-project/pull/156422
More information about the Mlir-commits
mailing list