[Mlir-commits] [mlir] d7ac92f - [MLIR] Remove hardcoded usage of alloc/dealloc; use memory effects
Uday Bondhugula
llvmlistbot at llvm.org
Thu Aug 18 02:24:33 PDT 2022
Author: Uday Bondhugula
Date: 2022-08-18T14:46:03+05:30
New Revision: d7ac92f27b30f506b72281dd07880414b997fd72
URL: https://github.com/llvm/llvm-project/commit/d7ac92f27b30f506b72281dd07880414b997fd72
DIFF: https://github.com/llvm/llvm-project/commit/d7ac92f27b30f506b72281dd07880414b997fd72.diff
LOG: [MLIR] Remove hardcoded usage of alloc/dealloc; use memory effects
Remove hardcoded usage of memref.alloc/dealloc ops in affine utils; use memory
effects instead. This is NFC for existing test cases, but strictly more
general/powerful in terms of functionality: it allows other allocation and
freeing ops (like gpu.alloc/dealloc) to work in conjunction with affine ops and
utilities.
Differential Revision: https://reviews.llvm.org/D132011
Added:
Modified:
mlir/lib/Dialect/Affine/Utils/Utils.cpp
mlir/test/Dialect/Affine/scalrep.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index 80e830c9173bf..ae949b62a5279 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -659,15 +659,17 @@ LogicalResult mlir::normalizeAffineFor(AffineForOp op) {
/// that would change the read within `memOp`.
template <typename EffectType, typename T>
static bool hasNoInterveningEffect(Operation *start, T memOp) {
- Value memref = memOp.getMemRef();
- bool isOriginalAllocation = memref.getDefiningOp<memref::AllocaOp>() ||
- memref.getDefiningOp<memref::AllocOp>();
+ auto isLocallyAllocated = [](Value memref) {
+ auto *defOp = memref.getDefiningOp();
+ return defOp && hasSingleEffect<MemoryEffects::Allocate>(defOp, memref);
+ };
// A boolean representing whether an intervening operation could have impacted
// memOp.
bool hasSideEffect = false;
// Check whether the effect on memOp can be caused by a given operation op.
+ Value memref = memOp.getMemRef();
std::function<void(Operation *)> checkOperation = [&](Operation *op) {
// If the effect has alreay been found, early exit,
if (hasSideEffect)
@@ -682,12 +684,12 @@ static bool hasNoInterveningEffect(Operation *start, T memOp) {
// If op causes EffectType on a potentially aliasing location for
// memOp, mark as having the effect.
if (isa<EffectType>(effect.getEffect())) {
- if (isOriginalAllocation && effect.getValue() &&
- (effect.getValue().getDefiningOp<memref::AllocaOp>() ||
- effect.getValue().getDefiningOp<memref::AllocOp>())) {
- if (effect.getValue() != memref)
- continue;
- }
+ // TODO: This should be replaced with a check for no aliasing.
+ // Aliasing information should be passed to this method.
+ if (effect.getValue() && effect.getValue() != memref &&
+ isLocallyAllocated(memref) &&
+ isLocallyAllocated(effect.getValue()))
+ continue;
opMayHaveEffect = true;
break;
}
@@ -1058,18 +1060,20 @@ void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
for (auto *op : opsToErase)
op->erase();
- // Check if the store fwd'ed memrefs are now left with only stores and can
- // thus be completely deleted. Note: the canonicalize pass should be able
- // to do this as well, but we'll do it here since we collected these anyway.
+ // Check if the store fwd'ed memrefs are now left with only stores and
+ // deallocs and can thus be completely deleted. Note: the canonicalize pass
+ // should be able to do this as well, but we'll do it here since we collected
+ // these anyway.
for (auto memref : memrefsToErase) {
- // If the memref hasn't been alloc'ed in this function, skip.
+ // If the memref hasn't been locally alloc'ed, skip.
Operation *defOp = memref.getDefiningOp();
- if (!defOp || !isa<memref::AllocOp>(defOp))
+ if (!defOp || !hasSingleEffect<MemoryEffects::Allocate>(defOp, memref))
// TODO: if the memref was returned by a 'call' operation, we
// could still erase it if the call had no side-effects.
continue;
if (llvm::any_of(memref.getUsers(), [&](Operation *ownerOp) {
- return !isa<AffineWriteOpInterface, memref::DeallocOp>(ownerOp);
+ return !isa<AffineWriteOpInterface>(ownerOp) &&
+ !hasSingleEffect<MemoryEffects::Free>(ownerOp, memref);
}))
continue;
@@ -1308,7 +1312,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(
// Skip dealloc's - no replacement is necessary, and a memref replacement
// at other uses doesn't hurt these dealloc's.
- if (isa<memref::DeallocOp>(op) && !replaceInDeallocOp)
+ if (hasSingleEffect<MemoryEffects::Free>(op, oldMemRef) &&
+ !replaceInDeallocOp)
continue;
// Check if the memref was used in a non-dereferencing context. It is fine
@@ -1732,8 +1737,8 @@ LogicalResult mlir::normalizeMemRef(memref::AllocOp *allocOp) {
}
// Replace any uses of the original alloc op and erase it. All remaining uses
// have to be dealloc's; RAMUW above would've failed otherwise.
- assert(llvm::all_of(oldMemRef.getUsers(), [](Operation *op) {
- return isa<memref::DeallocOp>(op);
+ assert(llvm::all_of(oldMemRef.getUsers(), [&](Operation *op) {
+ return hasSingleEffect<MemoryEffects::Free>(op, oldMemRef);
}));
oldMemRef.replaceAllUsesWith(newAlloc);
allocOp->erase();
diff --git a/mlir/test/Dialect/Affine/scalrep.mlir b/mlir/test/Dialect/Affine/scalrep.mlir
index 8adf49b6fb58d..867b3f68029cf 100644
--- a/mlir/test/Dialect/Affine/scalrep.mlir
+++ b/mlir/test/Dialect/Affine/scalrep.mlir
@@ -15,21 +15,21 @@ func.func @simple_store_load() {
%v0 = affine.load %m[%i0] : memref<10xf32>
%v1 = arith.addf %v0, %v0 : f32
}
+ memref.dealloc %m : memref<10xf32>
return
-// CHECK: %{{.*}} = arith.constant 7.000000e+00 : f32
+// CHECK: %[[C7:.*]] = arith.constant 7.000000e+00 : f32
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
-// CHECK-NEXT: %{{.*}} = arith.addf %{{.*}}, %{{.*}} : f32
+// CHECK-NEXT: arith.addf %[[C7]], %[[C7]] : f32
// CHECK-NEXT: }
// CHECK-NEXT: return
}
// CHECK-LABEL: func @multi_store_load() {
func.func @multi_store_load() {
- %c0 = arith.constant 0 : index
%cf7 = arith.constant 7.0 : f32
%cf8 = arith.constant 8.0 : f32
%cf9 = arith.constant 9.0 : f32
- %m = memref.alloc() : memref<10xf32>
+ %m = gpu.alloc() : memref<10xf32>
affine.for %i0 = 0 to 10 {
affine.store %cf7, %m[%i0] : memref<10xf32>
%v0 = affine.load %m[%i0] : memref<10xf32>
@@ -40,17 +40,16 @@ func.func @multi_store_load() {
%v3 = affine.load %m[%i0] : memref<10xf32>
%v4 = arith.mulf %v2, %v3 : f32
}
+ gpu.dealloc %m : memref<10xf32>
return
-// CHECK: %{{.*}} = arith.constant 0 : index
-// CHECK-NEXT: %{{.*}} = arith.constant 7.000000e+00 : f32
-// CHECK-NEXT: %{{.*}} = arith.constant 8.000000e+00 : f32
-// CHECK-NEXT: %{{.*}} = arith.constant 9.000000e+00 : f32
+// CHECK-NEXT: %[[C7:.*]] = arith.constant 7.000000e+00 : f32
+// CHECK-NEXT: arith.constant 8.000000e+00 : f32
+// CHECK-NEXT: %[[C9:.*]] = arith.constant 9.000000e+00 : f32
// CHECK-NEXT: affine.for %{{.*}} = 0 to 10 {
-// CHECK-NEXT: %{{.*}} = arith.addf %{{.*}}, %{{.*}} : f32
-// CHECK-NEXT: %{{.*}} = arith.mulf %{{.*}}, %{{.*}} : f32
+// CHECK-NEXT: arith.addf %[[C7]], %[[C7]] : f32
+// CHECK-NEXT: arith.mulf %[[C9]], %[[C9]] : f32
// CHECK-NEXT: }
// CHECK-NEXT: return
-
}
// The store-load forwarding can see through affine apply's since it relies on
More information about the Mlir-commits
mailing list