[Mlir-commits] [mlir] d31a6df - [mlir][bufferization] Add bufferization.copy_tensor op

Matthias Springer llvmlistbot at llvm.org
Tue Jun 27 05:58:31 PDT 2023


Author: Matthias Springer
Date: 2023-06-27T14:57:13+02:00
New Revision: d31a6dfbc0a9b259e365b3748bf142884b6fabe5

URL: https://github.com/llvm/llvm-project/commit/d31a6dfbc0a9b259e365b3748bf142884b6fabe5
DIFF: https://github.com/llvm/llvm-project/commit/d31a6dfbc0a9b259e365b3748bf142884b6fabe5.diff

LOG: [mlir][bufferization] Add bufferization.copy_tensor op

This operation is a "copy" operation on tensors. It is guaranteed to bufferize to a memcpy. This is different from "tensor.insert_slice", which may fold away.

Note: There is a symmetry between certain tensor, bufferization and memref ops:
* `tensor.empty`, `bufferization.alloc_tensor`, `memref.alloc`
* (none), `bufferization.dealloc_tensor`, `memref.dealloc`
* `tensor.insert_slice`, `bufferization.copy_tensor`, `memref.copy`

Tensor ops can generally canonicalize/fold away, while bufferization dialect ops can be used when a certain side effect is expected to materialize; so they do not fold away.

Differential Revision: https://reviews.llvm.org/D153552

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
    mlir/test/Dialect/Bufferization/invalid.mlir
    mlir/test/Dialect/Bufferization/ops.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index 726b6b525dc20..a1e2f8114269f 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -76,6 +76,9 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
     %c = bufferization.alloc_tensor(%d1, %d2) size_hint = %noe
       : tensor<?x?xf32, #SparseMatrix>
     ```
+
+    Note: An `alloc_tensor` with a `copy` should also be expressed as an
+    `alloc_tensor` without `copy`, followed by a `copy_tensor`.
   }];
 
   let arguments = (ins Variadic<Index>:$dynamic_sizes,
@@ -202,6 +205,46 @@ def Bufferization_CloneOp : Bufferization_Op<"clone", [
   let hasCanonicalizer = 1;
 }
 
+//===----------------------------------------------------------------------===//
+// CopyTensorOp
+//===----------------------------------------------------------------------===//
+
+def Bufferization_CopyTensorOp : Bufferization_Op<"copy_tensor",
+    [BufferizableOpInterface, SameOperandsAndResultType,
+     DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
+  let summary = "copy a tensor";
+
+  let description = [{
+    Copy the contents of the source tensor into the destination tensor. This
+    operation is guaranteed to bufferize to a memory copy.
+  }];
+
+  let arguments = (ins AnyTensor:$source,
+                       AnyTensor:$dest);
+
+  let results = (outs AnyTensor:$result);
+
+  let extraClassDeclaration = [{
+    LogicalResult bufferize(RewriterBase &rewriter,
+                            const BufferizationOptions &options);
+
+    bool bufferizesToMemoryRead(OpOperand &opOperand,
+                                const AnalysisState &state);
+
+    bool bufferizesToMemoryWrite(OpOperand &opOperand,
+                                 const AnalysisState &state);
+
+    AliasingOpResultList getAliasingOpResults(
+        OpOperand &opOperand, const AnalysisState &state);
+
+    RankedTensorType getType() {
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
+    }
+  }];
+
+  let assemblyFormat = "$source `,` $dest attr-dict `:` type($source)";
+}
+
 //===----------------------------------------------------------------------===//
 // DeallocTensorOp
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index fdd6f3de6464a..4eb8231e5e2bd 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -444,6 +444,49 @@ Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {
   return getOperand(getIndexOfDynamicSize(idx));
 }
 
+//===----------------------------------------------------------------------===//
+// CopyTensorOp
+//===----------------------------------------------------------------------===//
+
+bool CopyTensorOp::bufferizesToMemoryRead(OpOperand &opOperand,
+                                          const AnalysisState &state) {
+  if (&opOperand == &getOperation()->getOpOperand(0) /*source*/)
+    return true;
+  return false;
+}
+
+bool CopyTensorOp::bufferizesToMemoryWrite(OpOperand &opOperand,
+                                           const AnalysisState &state) {
+  if (&opOperand == &getOperation()->getOpOperand(1) /*dest*/)
+    return true;
+  return false;
+}
+
+AliasingOpResultList
+CopyTensorOp::getAliasingOpResults(OpOperand &opOperand,
+                                   const AnalysisState &state) {
+  if (&opOperand == &getOperation()->getOpOperand(1) /*dest*/)
+    return {{getOperation()->getResult(0), BufferRelation::Equivalent}};
+  return {};
+}
+
+LogicalResult CopyTensorOp::bufferize(RewriterBase &rewriter,
+                                      const BufferizationOptions &options) {
+  FailureOr<Value> buffer = getBuffer(rewriter, getDest(), options);
+  if (failed(buffer))
+    return failure();
+  rewriter.create<memref::TensorStoreOp>(getLoc(), getSource(), *buffer);
+  replaceOpWithBufferizedValues(rewriter, getOperation(), *buffer);
+  return success();
+}
+
+LogicalResult CopyTensorOp::reifyResultShapes(
+    OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
+  reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(getType().getRank()));
+  reifiedReturnShapes[0] = tensor::getMixedSizes(builder, getLoc(), getDest());
+  return success();
+}
+
 //===----------------------------------------------------------------------===//
 // CloneOp
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
index acefa14db487f..ace3cf1044556 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
@@ -208,3 +208,19 @@ func.func @from_unranked_to_unranked(%arg0: tensor<*xi32>) -> tensor<*xi32> {
   %0 = tensor.cast %arg0 : tensor<*xi32> to tensor<*xi32>
   return %0 : tensor<*xi32>
 }
+
+// -----
+
+// CHECK-LABEL: func @tensor_copy(
+//  CHECK-SAME:     %[[arg0:.*]]: tensor<5xf32>)
+func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> {
+  // CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]]
+  // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32>
+  // CHECK: memref.copy %[[m]], %[[alloc]]
+  // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]]
+  // CHECK: memref.dealloc %[[alloc]]
+  // CHECK: return %[[r]]
+  %dest = bufferization.alloc_tensor() : tensor<5xf32>
+  %0 = bufferization.copy_tensor %arg0, %dest : tensor<5xf32>
+  return %0 : tensor<5xf32>
+}

diff  --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir
index 364464226cb3e..09f3ed40aa723 100644
--- a/mlir/test/Dialect/Bufferization/invalid.mlir
+++ b/mlir/test/Dialect/Bufferization/invalid.mlir
@@ -95,3 +95,11 @@ func.func @invalid_writable_on_op() {
   // expected-error @+1{{attribute '"bufferization.writable"' not supported as an op attribute by the bufferization dialect}}
   arith.constant {bufferization.writable = true} 0  : index
 }
+
+// -----
+
+// expected-note @below{{prior use here}}
+func.func @invalid_tensor_copy(%arg0: tensor<?xf32>, %arg1: tensor<5xf32>) {
+  // expected-error @below{{expects 
diff erent type than prior uses: 'tensor<?xf32>' vs 'tensor<5xf32>'}}
+  bufferization.copy_tensor %arg0, %arg1 : tensor<?xf32>
+}

diff  --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir
index ddb597a334d81..621568b654f13 100644
--- a/mlir/test/Dialect/Bufferization/ops.mlir
+++ b/mlir/test/Dialect/Bufferization/ops.mlir
@@ -57,3 +57,11 @@ func.func @test_dealloc_tensor_op(%arg0: tensor<4xi32>) {
   bufferization.dealloc_tensor %arg0 : tensor<4xi32>
   return
 }
+
+// CHECK-LABEL: func @test_copy_tensor_op
+func.func @test_copy_tensor_op(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>)
+    -> tensor<?xf32> {
+  // CHECK: bufferization.copy_tensor {{.*}} : tensor<?xf32>
+  %1 = bufferization.copy_tensor %arg0, %arg1 : tensor<?xf32>
+  return %1 : tensor<?xf32>
+}


        


More information about the Mlir-commits mailing list