[Mlir-commits] [mlir] [mlir][bufferization] Remove `finalizing-bufferize` pass (PR #114154)

Matthias Springer llvmlistbot at llvm.org
Wed Nov 20 17:38:40 PST 2024


https://github.com/matthias-springer updated https://github.com/llvm/llvm-project/pull/114154

>From 1beeac3bf2e1c16ed9b4f92e6d0596f498074f19 Mon Sep 17 00:00:00 2001
From: Matthias Springer <mspringer at nvidia.com>
Date: Tue, 29 Oct 2024 09:51:11 +0100
Subject: [PATCH] [mlir][bufferization] Remove `finalizing-bufferize` pass

The dialect conversion-based bufferization passes have been migrated to One-Shot Bufferize about two years ago. To clean up the code base, this commit removes the `finalizing-bufferize` pass, one of the few remaining parts of the old infrastructure. Most bufferization passes have already been removed.

Note for LLVM integration: If you depend on this pass, migrate to One-Shot Bufferize or copy the pass to your codebase.

Depends on #114152.
---
 .../Bufferization/Transforms/Bufferize.h      |  6 --
 .../Dialect/Bufferization/Transforms/Passes.h |  4 -
 .../Bufferization/Transforms/Passes.td        | 16 ----
 .../Bufferization/Transforms/Bufferize.cpp    | 75 ---------------
 .../Pipelines/SparseTensorPipelines.cpp       |  2 -
 .../Transforms/finalizing-bufferize.mlir      | 95 -------------------
 6 files changed, 198 deletions(-)
 delete mode 100644 mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir

diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h
index 1603dfcbae5589..ebed2c354bfca5 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h
@@ -56,12 +56,6 @@ class BufferizeTypeConverter : public TypeConverter {
 /// populateEliminateBufferizeMaterializationsPatterns.
 void populateBufferizeMaterializationLegality(ConversionTarget &target);
 
-/// Populate patterns to eliminate bufferize materializations.
-///
-/// In particular, these are the tensor_load/buffer_cast ops.
-void populateEliminateBufferizeMaterializationsPatterns(
-    const BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns);
-
 /// Bufferize `op` and its nested ops that implement `BufferizableOpInterface`.
 ///
 /// Note: This function does not resolve read-after-write conflicts. Use this
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
index ab9a48f3473c27..fe43a05c81fdc3 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
@@ -200,10 +200,6 @@ std::unique_ptr<Pass> createEmptyTensorToAllocTensorPass();
 /// Drop all memref function results that are equivalent to a function argument.
 LogicalResult dropEquivalentBufferResults(ModuleOp module);
 
-/// Creates a pass that finalizes a partial bufferization by removing remaining
-/// bufferization.to_tensor and bufferization.to_memref operations.
-std::unique_ptr<OperationPass<func::FuncOp>> createFinalizingBufferizePass();
-
 /// Create a pass that bufferizes all ops that implement BufferizableOpInterface
 /// with One-Shot Bufferize.
 std::unique_ptr<Pass> createOneShotBufferizePass();
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
index 2743de43fb9cfa..3e93f33ffe0fb4 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
@@ -343,22 +343,6 @@ def BufferResultsToOutParams : Pass<"buffer-results-to-out-params", "ModuleOp">
   let dependentDialects = ["memref::MemRefDialect"];
 }
 
-def FinalizingBufferize : Pass<"finalizing-bufferize", "func::FuncOp"> {
-  let summary = "Finalize a partial bufferization";
-  let description = [{
-    A bufferize pass that finalizes a partial bufferization by removing
-    remaining `bufferization.to_tensor` and `bufferization.to_buffer` operations.
-
-    The removal of those operations is only possible if the operations only
-    exist in pairs, i.e., all uses of `bufferization.to_tensor` operations are
-    `bufferization.to_buffer` operations.
-
-    This pass will fail if not all operations can be removed or if any operation
-    with tensor typed operands remains.
-  }];
-  let constructor = "mlir::bufferization::createFinalizingBufferizePass()";
-}
-
 def DropEquivalentBufferResults : Pass<"drop-equivalent-buffer-results", "ModuleOp">  {
   let summary = "Remove MemRef return values that are equivalent to a bbArg";
   let description = [{
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
index 1d009b03754c52..62ce2583f4fa1d 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
@@ -26,7 +26,6 @@
 
 namespace mlir {
 namespace bufferization {
-#define GEN_PASS_DEF_FINALIZINGBUFFERIZE
 #define GEN_PASS_DEF_BUFFERIZATIONBUFFERIZE
 #define GEN_PASS_DEF_ONESHOTBUFFERIZE
 #include "mlir/Dialect/Bufferization/Transforms/Passes.h.inc"
@@ -98,75 +97,6 @@ void mlir::bufferization::populateBufferizeMaterializationLegality(
 }
 
 namespace {
-// In a finalizing bufferize conversion, we know that all tensors have been
-// converted to memrefs, thus, this op becomes an identity.
-class BufferizeToTensorOp
-    : public OpConversionPattern<bufferization::ToTensorOp> {
-public:
-  using OpConversionPattern::OpConversionPattern;
-  LogicalResult
-  matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor,
-                  ConversionPatternRewriter &rewriter) const override {
-    rewriter.replaceOp(op, adaptor.getMemref());
-    return success();
-  }
-};
-} // namespace
-
-namespace {
-// In a finalizing bufferize conversion, we know that all tensors have been
-// converted to memrefs, thus, this op becomes an identity.
-class BufferizeToMemrefOp
-    : public OpConversionPattern<bufferization::ToMemrefOp> {
-public:
-  using OpConversionPattern::OpConversionPattern;
-  LogicalResult
-  matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor,
-                  ConversionPatternRewriter &rewriter) const override {
-    rewriter.replaceOp(op, adaptor.getTensor());
-    return success();
-  }
-};
-} // namespace
-
-void mlir::bufferization::populateEliminateBufferizeMaterializationsPatterns(
-    const BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) {
-  patterns.add<BufferizeToTensorOp, BufferizeToMemrefOp>(typeConverter,
-                                                         patterns.getContext());
-}
-
-namespace {
-struct FinalizingBufferizePass
-    : public bufferization::impl::FinalizingBufferizeBase<
-          FinalizingBufferizePass> {
-  using FinalizingBufferizeBase<
-      FinalizingBufferizePass>::FinalizingBufferizeBase;
-
-  void runOnOperation() override {
-    auto func = getOperation();
-    auto *context = &getContext();
-
-    BufferizeTypeConverter typeConverter;
-    RewritePatternSet patterns(context);
-    ConversionTarget target(*context);
-
-    populateEliminateBufferizeMaterializationsPatterns(typeConverter, patterns);
-
-    // If all result types are legal, and all block arguments are legal (ensured
-    // by func conversion above), then all types in the program are legal.
-    //
-    // We also check that the operand types are legal to avoid creating invalid
-    // IR. For example, this prevents
-    // populateEliminateBufferizeMaterializationsPatterns from updating the
-    // types of the operands to a return op without updating the enclosing
-    // function.
-    target.markUnknownOpDynamicallyLegal(
-        [&](Operation *op) { return typeConverter.isLegal(op); });
-
-    if (failed(applyFullConversion(func, target, std::move(patterns))))
-      signalPassFailure();
-  }
-};
 
 static LayoutMapOption parseLayoutMapOption(const std::string &s) {
   if (s == "fully-dynamic-layout-map")
@@ -331,11 +261,6 @@ std::unique_ptr<Pass> mlir::bufferization::createOneShotBufferizePass(
   return std::make_unique<OneShotBufferizePass>(options);
 }
 
-std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::bufferization::createFinalizingBufferizePass() {
-  return std::make_unique<FinalizingBufferizePass>();
-}
-
 //===----------------------------------------------------------------------===//
 // BufferizableOpInterface-based Bufferization
 //===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
index abc4a4c252841b..96ccf9a9a24087 100644
--- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
@@ -55,8 +55,6 @@ void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm,
   // Storage specifier lowering and bufferization wrap-up.
   pm.addPass(createStorageSpecifierToLLVMPass());
   pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
-  pm.addNestedPass<func::FuncOp>(
-      mlir::bufferization::createFinalizingBufferizePass());
 
   // GPU code generation.
   const bool gpuCodegen = options.gpuTriple.hasValue();
diff --git a/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir
deleted file mode 100644
index bae94c1be4da90..00000000000000
--- a/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir
+++ /dev/null
@@ -1,95 +0,0 @@
-// RUN: mlir-opt %s -finalizing-bufferize -split-input-file -verify-diagnostics | FileCheck %s
-
-// CHECK-LABEL:   func @eliminate_materializations(
-// CHECK-SAME:                                     %[[ARG:.*]]: memref<f32>) -> memref<f32> {
-// CHECK:           return %[[ARG]] : memref<f32>
-func.func @eliminate_materializations(%arg0: memref<f32>) -> memref<f32> {
-  %0 = bufferization.to_tensor %arg0 : memref<f32>
-  %1 = bufferization.to_memref %0 : memref<f32>
-  return %1 : memref<f32>
-}
-
-// -----
-
-func.func @unable_to_convert_lone_buffer_cast() -> memref<f32> {
-  // expected-error @+1 {{failed to legalize operation 'test.source'}}
-  %0 = "test.source"() : () -> tensor<f32>
-  %1 = bufferization.to_memref %0 : memref<f32>
-  return %1 : memref<f32>
-}
-
-// -----
-
-func.func @unable_to_convert_lone_tensor_load(%arg0: memref<f32>) {
-  %0 = bufferization.to_tensor %arg0 : memref<f32>
-  // expected-error @+1 {{failed to legalize operation 'test.sink'}}
-  "test.sink"(%0) : (tensor<f32>) -> ()
-  return
-}
-
-// -----
-
-// CHECK-LABEL: func @dyn_layout_to_no_layout_cast(
-//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, strided<[1], offset: ?>>)
-//       CHECK:   %[[c0:.*]] = arith.constant 0 : index
-//       CHECK:   %[[dim:.*]] = memref.dim %[[arg]], %[[c0]]
-//       CHECK:   %[[alloc:.*]] = memref.alloc(%[[dim]]) : memref<?xf32>
-//       CHECK:   memref.copy %[[arg]], %[[alloc]]
-//       CHECK:   return %[[alloc]]
-func.func @dyn_layout_to_no_layout_cast(%m: memref<?xf32, strided<[1], offset: ?>>) -> memref<?xf32> {
-  %0 = bufferization.to_tensor %m : memref<?xf32, strided<[1], offset: ?>>
-  %1 = bufferization.to_memref %0 : memref<?xf32>
-  return %1 : memref<?xf32>
-}
-
-// -----
-
-// CHECK-LABEL: func @fancy_layout_to_no_layout_cast(
-//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, strided<[100], offset: ?>>)
-//       CHECK:   %[[c0:.*]] = arith.constant 0 : index
-//       CHECK:   %[[dim:.*]] = memref.dim %[[arg]], %[[c0]]
-//       CHECK:   %[[alloc:.*]] = memref.alloc(%[[dim]]) : memref<?xf32>
-//       CHECK:   memref.copy %[[arg]], %[[alloc]]
-//       CHECK:   return %[[alloc]]
-func.func @fancy_layout_to_no_layout_cast(%m: memref<?xf32, strided<[100], offset: ?>>) -> memref<?xf32> {
-  %0 = bufferization.to_tensor %m : memref<?xf32, strided<[100], offset: ?>>
-  %1 = bufferization.to_memref %0 : memref<?xf32>
-  return %1 : memref<?xf32>
-}
-
-// -----
-
-// CHECK-LABEL: func @static_layout_to_no_layout_cast(
-//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, strided<[1], offset: 25>>)
-//       CHECK:   %[[c0:.*]] = arith.constant 0 : index
-//       CHECK:   %[[dim:.*]] = memref.dim %[[arg]], %[[c0]]
-//       CHECK:   %[[alloc:.*]] = memref.alloc(%[[dim]]) : memref<?xf32>
-//       CHECK:   memref.copy %[[arg]], %[[alloc]]
-//       CHECK:   return %[[alloc]]
-func.func @static_layout_to_no_layout_cast(%m: memref<?xf32, strided<[1], offset: 25>>) -> memref<?xf32> {
-  %0 = bufferization.to_tensor %m : memref<?xf32, strided<[1], offset: 25>>
-  %1 = bufferization.to_memref %0 : memref<?xf32>
-  return %1 : memref<?xf32>
-}
-
-// -----
-
-// TODO: to_memref with layout maps not supported yet. This should fold to a
-// memref.cast.
-func.func @no_layout_to_dyn_layout_cast(%m: memref<?xf32>) -> memref<?xf32, strided<[1], offset: ?>> {
-  %0 = bufferization.to_tensor %m : memref<?xf32>
-  // expected-error @+1 {{failed to legalize unresolved materialization from ('memref<?xf32>') to ('memref<?xf32, strided<[1], offset: ?>>') that remained live after conversion}}
-  %1 = bufferization.to_memref %0 : memref<?xf32, strided<[1], offset: ?>>
-  // expected-note @below{{see existing live user here}}
-  return %1 : memref<?xf32, strided<[1], offset: ?>>
-}
-
-// -----
-
-func.func @illegal_unranked_to_rank(%m: memref<*xf32>) -> memref<?xf32> {
-  // expected-note @+1 {{prior use here}}
-  %0 = bufferization.to_tensor %m : memref<*xf32>
-  // expected-error @+1 {{expects different type than prior uses: 'tensor<?xf32>' vs 'tensor<*xf32>'}}
-  %1 = bufferization.to_memref %0 : memref<?xf32>
-  return %1 : memref<?xf32>
-}



More information about the Mlir-commits mailing list