[Mlir-commits] [mlir] ad1ba42 - [mlir][linalg][bufferize] Allow unbufferizable ops in input

Matthias Springer llvmlistbot at llvm.org
Fri Dec 3 03:25:30 PST 2021


Author: Matthias Springer
Date: 2021-12-03T20:20:46+09:00
New Revision: ad1ba42f687fdb772e9bccf2011f52e11feacda5

URL: https://github.com/llvm/llvm-project/commit/ad1ba42f687fdb772e9bccf2011f52e11feacda5
DIFF: https://github.com/llvm/llvm-project/commit/ad1ba42f687fdb772e9bccf2011f52e11feacda5.diff

LOG: [mlir][linalg][bufferize] Allow unbufferizable ops in input

Allow ops that are not bufferizable in the input IR. (Deactivated by default.)

bufferization::ToMemrefOp and bufferization::ToTensorOp are generated at the bufferization boundaries.

Differential Revision: https://reviews.llvm.org/D114669

Added: 
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-partial.mlir

Modified: 
    mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h
    mlir/include/mlir/Dialect/Linalg/Passes.td
    mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp
    mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h
index 173126f5c7d4d..3b342e30dc890 100644
--- a/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h
@@ -93,6 +93,11 @@ struct BufferizationOptions {
   /// Otherwise, a pass failure is triggered.
   bool allowReturnMemref = false;
 
+  /// Specifies whether not bufferizable ops are allowed in the input. If so,
+  /// bufferization.to_memref and bufferization.to_tensor ops are inserted at
+  /// the boundaries.
+  bool allowUnknownOps = false;
+
   /// Seed for the analysis fuzzer. If set to `0`, the fuzzer is deactivated.
   /// Should be used only with `testAnalysisOnly = true`.
   unsigned analysisFuzzerSeed = 0;
@@ -314,7 +319,7 @@ struct BufferizationState {
 
   /// Lookup the memref buffer that is associated to the given tensor value.
   /// Asserts if no buffer is associated.
-  Value lookupBuffer(Value tensor) const;
+  Value lookupBuffer(Value tensor);
 
   /// Lookup the value that is associated to the given value. Asserts if no
   /// value is associated.
@@ -436,7 +441,13 @@ struct AllocationHoistingBarrierOnly
     auto isaTensor = [](Type t) { return t.isa<TensorType>(); };
     if (any_of(op->getOperandTypes(), isaTensor) ||
         any_of(op->getResultTypes(), isaTensor))
-      return op->emitError() << "unsupported op with tensors";
+      if (!state.options.allowUnknownOps)
+        return op->emitError() << "unsupported op with tensors";
+
+    for (Region &region : op->getRegions())
+      if (failed(comprehensive_bufferize::bufferize(&region, state)))
+        return failure();
+
     return success();
   }
 

diff  --git a/mlir/include/mlir/Dialect/Linalg/Passes.td b/mlir/include/mlir/Dialect/Linalg/Passes.td
index 060f46241d8b5..cfba6ac546a13 100644
--- a/mlir/include/mlir/Dialect/Linalg/Passes.td
+++ b/mlir/include/mlir/Dialect/Linalg/Passes.td
@@ -41,6 +41,9 @@ def LinalgComprehensiveModuleBufferize :
     Option<"allowReturnMemref", "allow-return-memref", "bool",
             /*default=*/"false",
            "Allows the return of memrefs (for testing purposes only)">,
+    Option<"allowUnknownOps", "allow-unknown-ops", "bool",
+           /*default=*/"false",
+           "Allows unknown (not bufferizable) ops in the input IR.">,
     Option<"useAlloca", "use-alloca", "bool",
            /*default=*/"false",
            "Use stack allocations for memrefs (for testing purposes only)">,

diff  --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp
index c6ba42de66237..61c4f8ee4a05f 100644
--- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp
@@ -215,6 +215,14 @@ BufferizationAliasInfo::getAliases(Value v) const {
 // Helper functions for BufferizableOpInterface
 //===----------------------------------------------------------------------===//
 
+static void setInsertionPointAfter(OpBuilder &b, Value value) {
+  if (auto bbArg = value.dyn_cast<BlockArgument>()) {
+    b.setInsertionPointToStart(bbArg.getOwner());
+  } else {
+    b.setInsertionPointAfter(value.getDefiningOp());
+  }
+}
+
 /// Determine which OpOperand* will alias with `result` if the op is bufferized
 /// in place. Return an empty vector if the op is not bufferizable.
 SmallVector<OpOperand *>
@@ -378,7 +386,8 @@ Value mlir::linalg::comprehensive_bufferize::getResultBuffer(
   // TODO: Should be looking for checking for "equivalent buffers" instead of
   // operator== here, but equivalent buffers for scf.if yield values are not
   // set up yet.
-  if (!llvm::all_of(aliasingOperands, [&](OpOperand *o) {
+  if (aliasingOperands.size() > 1 &&
+      !llvm::all_of(aliasingOperands, [&](OpOperand *o) {
         return state.lookupBuffer(o->get()) == operandBuffer;
       })) {
     op->emitError("result buffer is ambiguous");
@@ -395,11 +404,7 @@ Value mlir::linalg::comprehensive_bufferize::getResultBuffer(
     Location loc = op->getLoc();
     // Move insertion point right after `operandBuffer`. That is where the
     // allocation should be inserted (in the absence of allocation hoisting).
-    if (auto bbArg = operandBuffer.dyn_cast<BlockArgument>()) {
-      b.setInsertionPointToStart(bbArg.getOwner());
-    } else {
-      b.setInsertionPointAfter(operandBuffer.getDefiningOp());
-    }
+    setInsertionPointAfter(b, operandBuffer);
     // Allocate the result buffer.
     Value resultBuffer = state.createAllocDeallocFn(b, loc, operandBuffer);
     bool skipCopy = false;
@@ -471,12 +476,31 @@ mlir::linalg::comprehensive_bufferize::bufferize(Operation *op,
 
   // Bufferize using `BufferizableOpInterface`. Interface implementations are
   // responsible for bufferizing nested ops.
-  b.setInsertionPoint(op);
-  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
+  if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op)) {
+    b.setInsertionPoint(op);
     return bufferizableOp.bufferize(b, state);
+  }
+
+  // `op` is an unbufferizable tensor op.
+  if (!state.options.allowUnknownOps)
+    return op->emitError() << "unsupported op with tensors";
+
+  // Replace all OpOperands with "to-tensor casted" bufferized values.
+  for (OpOperand &operand : op->getOpOperands()) {
+    if (operand.get().getType().isa<TensorType>() &&
+        state.isMapped(operand.get())) {
+      b.setInsertionPoint(op);
+      Value toTensorOp = b.create<bufferization::ToTensorOp>(
+          op->getLoc(), state.lookupBuffer(operand.get()));
+      operand.set(toTensorOp);
+    }
+  }
+
+  for (Region &region : op->getRegions())
+    if (failed(bufferize(&region, state)))
+      return failure();
 
-  // Emit error if tensor op is not bufferizable.
-  return op->emitError() << "unsupported op with tensors";
+  return success();
 }
 
 //===----------------------------------------------------------------------===//
@@ -636,22 +660,36 @@ void mlir::linalg::comprehensive_bufferize::BufferizationState::mapValue(
 
 /// Wrapper for better debugging.
 Value mlir::linalg::comprehensive_bufferize::BufferizationState::lookupBuffer(
-    Value tensor) const {
+    Value tensor) {
   // TODO: if key comes from bbArg, forward.
   assert(tensor.getType().isa<TensorType>() && "unexpected non-tensor type");
-  Value v = mapping.lookupOrNull(tensor);
+  Value buffer = mapping.lookupOrNull(tensor);
+
+  if (!buffer) {
+    if (options.allowUnknownOps) {
+      // `tensor` was not bufferized yet. This should never happen with
+      // bufferizable ops.
+      assert(!tensor.getDefiningOp<BufferizableOpInterface>() &&
+             "tensor is not mapped");
+      // Insert to_memref op.
+      OpBuilder b(tensor.getContext());
+      setInsertionPointAfter(b, tensor);
+      return b.create<bufferization::ToMemrefOp>(
+          tensor.getLoc(),
+          getDynamicMemRefType(tensor.getType().cast<RankedTensorType>()),
+          tensor);
+    }
 
-  if (!v) {
     // Dump tensor for easier debugging.
     tensor.dump();
     llvm_unreachable("tensor is not mapped");
     return Value();
   }
 
-  assert((v.getType().isa<MemRefType>() ||
-          v.getType().isa<UnrankedMemRefType>()) &&
+  assert((buffer.getType().isa<MemRefType>() ||
+          buffer.getType().isa<UnrankedMemRefType>()) &&
          "expected that tensor is mapped to memref");
-  return v;
+  return buffer;
 }
 
 Value mlir::linalg::comprehensive_bufferize::BufferizationState::lookupValue(

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
index 1910298213334..9da3ed883cfbe 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
@@ -86,6 +86,7 @@ void LinalgComprehensiveModuleBufferize::runOnOperation() {
   };
 
   options.allowReturnMemref = allowReturnMemref;
+  options.allowUnknownOps = allowUnknownOps;
   options.analysisFuzzerSeed = analysisFuzzerSeed;
   options.testAnalysisOnly = testAnalysisOnly;
 

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-partial.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-partial.mlir
new file mode 100644
index 0000000000000..6da6b2a514dc5
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-partial.mlir
@@ -0,0 +1,150 @@
+// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops" -split-input-file | FileCheck %s
+
+// TODO: Bufferize result IR of bufferization.
+// TODO: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops" -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops" -split-input-file | FileCheck %s
+
+// Run fuzzer with 
diff erent seeds.
+// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+
+// CHECK-LABEL: func @use_of_unknown_op_1(
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @use_of_unknown_op_1(%t1: tensor<?xf32> {linalg.inplaceable = true})
+    -> vector<5xf32> {
+  // ToTensorOp is generated because the function is bufferized and has a
+  // memref block argument.
+  // CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
+  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
+  %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
+
+  %idx = arith.constant 0 : index
+  %cst = arith.constant 0.0 : f32
+  // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]]
+  // CHECK: vector.transfer_read %[[dummy_memref]]
+  %1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
+  return %1 : vector<5xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @use_of_unknown_op_2(
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @use_of_unknown_op_2(%t1: tensor<?xf32> {linalg.inplaceable = true})
+    -> tensor<?xf32> {
+  // CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
+
+  // CHECK: %[[dummy1:.*]] = "test.dummy_op"(%[[m1_tensor]])
+  %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
+  // CHECK: %[[dummy2:.*]] = "test.another_dummy_op"(%[[dummy1]])
+  %1 = "test.another_dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32>
+
+  // CHECK: %[[dummy2_memref:.*]] = bufferization.to_memref %[[dummy2]]
+  // CHECK: return %[[dummy2_memref]]
+  return %1 : tensor<?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @use_of_unknown_op_3(
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @use_of_unknown_op_3(%t1: tensor<?xf32> {linalg.inplaceable = true})
+    -> (vector<5xf32>, vector<5xf32>) {
+  %idx = arith.constant 0 : index
+  %cst = arith.constant 0.0 : f32
+  // CHECK: %[[v1:.*]] = vector.transfer_read %[[m1]]
+  %1 = vector.transfer_read %t1[%idx], %cst : tensor<?xf32>, vector<5xf32>
+
+  // CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
+  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
+  %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
+  // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]]
+  // CHECK: %[[v2:.*]] = vector.transfer_read %[[dummy_memref]]
+  %2 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
+
+  // CHECK: return %[[v1]], %[[v2]]
+  return %1, %2 : vector<5xf32>, vector<5xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @use_of_unknown_op_4(
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @use_of_unknown_op_4(%t1: tensor<?xf32> {linalg.inplaceable = true})
+    -> (vector<5xf32>, tensor<?xf32>) {
+  %idx = arith.constant 0 : index
+  %cst = arith.constant 0.0 : f32
+
+  // CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
+  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
+  %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
+
+  // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]]
+  // CHECK: %[[v1:.*]] = vector.transfer_read %[[dummy_memref]]
+  %1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
+
+  // CHECK: %[[another_dummy:.*]] = "test.another_dummy_op"(%[[dummy]])
+  %2 = "test.another_dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32>
+
+  // CHECK: %[[another_dummy_memref:.*]] = bufferization.to_memref %[[another_dummy]]
+  // CHECK: return %[[v1]], %[[another_dummy_memref]]
+  return %1, %2 : vector<5xf32>, tensor<?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @use_of_bufferizable_op_in_unbufferizable_op
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @use_of_bufferizable_op_in_unbufferizable_op(
+    %t1: tensor<?xf32>, %o: index, %s: index) -> (tensor<?xf32>, tensor<?xf32>) {
+  // CHECK: %[[subview:.*]] = memref.subview %[[m1]]
+  %0 = tensor.extract_slice %t1[%o][%s][1] : tensor<?xf32> to tensor<?xf32>
+  // CHECK: %[[subview_tensor:.*]] = bufferization.to_tensor %[[subview]]
+  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[subview_tensor]])
+  %1 = "test.dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32>
+  // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]]
+  // CHECK: return %[[subview]], %[[dummy_memref]]
+  return %0, %1 : tensor<?xf32>, tensor<?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @unused_unknown_op(
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @unused_unknown_op(%t1 : tensor<?xf32>) -> vector<5xf32> {
+  %idx = arith.constant 0 : index
+  %cst = arith.constant 0.0 : f32
+  // CHECK: vector.transfer_read %[[m1]]
+  %1 = vector.transfer_read %t1[%idx], %cst : tensor<?xf32>, vector<5xf32>
+
+  // ToTensorOp is inserted to pass in the result of the above bufferized op.
+  // CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
+  // CHECK: "test.dummy_op"(%[[m1_tensor]])
+  "test.dummy_op"(%t1) : (tensor<?xf32>) -> ()
+
+  return %1 : vector<5xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @unknown_op_not_writable
+//  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
+func @unknown_op_not_writable(
+    %t1 : tensor<?xf32>, %v :  vector<5xf32>, %idx : index) -> tensor<?xf32> {
+  // CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
+  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
+  // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]]
+  %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> (tensor<?xf32>)
+
+  // The result of an unknown op is not writable. Always generate a copy.
+  // Note: This copy is essential for partial bufferization. Otherwise, we could
+  // introducing a RaW conflict.
+  // CHECK: %[[dim:.*]] = tensor.dim %[[dummy]]
+  // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
+  // CHECK: linalg.copy(%[[dummy_memref]], %[[alloc]])
+  // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
+  %1 = vector.transfer_write %v, %0[%idx] : vector<5xf32>, tensor<?xf32>
+
+  // CHECK: return %[[alloc]]
+  return %1 : tensor<?xf32>
+}


        


More information about the Mlir-commits mailing list