[Mlir-commits] [mlir] 1a5aa77 - [mlir][linalg] BufferizeToAllocationOp: Add option to specify custom alloc op

Matthias Springer llvmlistbot at llvm.org
Fri Jul 14 04:40:14 PDT 2023


Author: Matthias Springer
Date: 2023-07-14T13:39:05+02:00
New Revision: 1a5aa77f3073446d5ff3d11eaab516f5943b984d

URL: https://github.com/llvm/llvm-project/commit/1a5aa77f3073446d5ff3d11eaab516f5943b984d
DIFF: https://github.com/llvm/llvm-project/commit/1a5aa77f3073446d5ff3d11eaab516f5943b984d.diff

LOG: [mlir][linalg] BufferizeToAllocationOp: Add option to specify custom alloc op

Supported ops are "memref.alloc" and "memref.alloca".

Differential Revision: https://reviews.llvm.org/D155282

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
    mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 7a2bc02451dec9..f1510f63abdbcd 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -128,6 +128,11 @@ def BufferizeToAllocationOp : Op<Transform_Dialect,
     a fully dynamic layout is assumed for best compatibility. Users should use
     "memref.tensor_store" when possible.
 
+    "memref.alloc" is used for new buffer allocations. The buffer is deallocated
+    at the end of the block. Custom allocation ops can be specified via
+    `alloc_op`. Currently supported are "memref.alloc" and "memref.alloca". In
+    case of a "memref.alloca", the buffer is not deallocated.
+
     #### Return modes
 
     This operation consumes the `target` handle and produces the
@@ -137,7 +142,9 @@ def BufferizeToAllocationOp : Op<Transform_Dialect,
   let arguments = (ins TransformHandleTypeInterface:$target,
                        OptionalAttr<AnyAttr>:$memory_space,
                        DefaultValuedAttr<StrAttr, "\"memref.tensor_store\"">:
-                           $memcpy_op);
+                           $memcpy_op,
+                       DefaultValuedAttr<StrAttr, "\"memref.alloc\"">:
+                           $alloc_op);
   let hasVerifier = 1;
   let results = (outs Transform_AnyValue:$allocated_buffer,
                       Transform_AnyOpType:$new_ops);

diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 3491eebc84d694..a78dc1e1e571bc 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -47,8 +47,10 @@ std::optional<vector::CombiningKind> getCombinerOpKind(Operation *combinerOp);
 //===----------------------------------------------------------------------===//
 
 struct BufferizeToAllocationOptions {
-  enum class MemcpyOp { MemrefTensorStore = 0, MemrefCopy = 1, LinalgCopy = 2 };
+  enum class AllocOp { MemrefAlloc = 0, MemrefAlloca = 1 };
+  AllocOp allocOp = AllocOp::MemrefAlloc;
 
+  enum class MemcpyOp { MemrefTensorStore = 0, MemrefCopy = 1, LinalgCopy = 2 };
   MemcpyOp memcpyOp = MemcpyOp::MemrefTensorStore;
 };
 

diff  --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 31fdca7affbcc6..a51050b742bfe0 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -248,6 +248,15 @@ DiagnosedSilenceableFailure transform::BufferizeToAllocationOp::apply(
   } else {
     llvm_unreachable("invalid memcpy op");
   }
+  if (getAllocOp() == "memref.alloc") {
+    options.allocOp =
+        linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloc;
+  } else if (getAllocOp() == "memref.alloca") {
+    options.allocOp =
+        linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloca;
+  } else {
+    llvm_unreachable("invalid alloc op");
+  }
 
   // Bufferize ops.
   Attribute memorySpace =
@@ -283,6 +292,8 @@ LogicalResult transform::BufferizeToAllocationOp::verify() {
   if (getMemcpyOp() != "memref.tensor_store" &&
       getMemcpyOp() != "memref.copy" && getMemcpyOp() != "linalg.copy")
     return emitOpError() << "unsupported memcpy op";
+  if (getAllocOp() != "memref.alloc" && getAllocOp() != "memref.alloca")
+    return emitOpError() << "unsupported alloc op";
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
index d75891af7e45d0..369ff8d3ceaf7e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
@@ -185,9 +185,10 @@ static SmallVector<Value> reifyOrComputeDynamicSizes(OpBuilder &b,
   return dynSizes;
 }
 
-static Value createAllocationForTensor(RewriterBase &rewriter, Location loc,
-                                       Value value,
-                                       Attribute memorySpace = {}) {
+static Value
+createAllocationForTensor(RewriterBase &rewriter, Location loc, Value value,
+                          const linalg::BufferizeToAllocationOptions &options,
+                          Attribute memorySpace = {}) {
   OpBuilder::InsertionGuard g(rewriter);
   auto tensorType = cast<RankedTensorType>(value.getType());
 
@@ -196,11 +197,19 @@ static Value createAllocationForTensor(RewriterBase &rewriter, Location loc,
       cast<MemRefType>(bufferization::getMemRefTypeWithStaticIdentityLayout(
           tensorType, memorySpace));
   SmallVector<Value> dynamicSizes = reifyOrComputeDynamicSizes(rewriter, value);
-  Value alloc = rewriter.create<memref::AllocOp>(loc, memrefType, dynamicSizes);
 
-  // Place deallocation at the end of the block.
-  rewriter.setInsertionPoint(rewriter.getInsertionBlock()->getTerminator());
-  rewriter.create<memref::DeallocOp>(loc, alloc);
+  Value alloc;
+  if (options.allocOp ==
+      linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloc) {
+    alloc = rewriter.create<memref::AllocOp>(loc, memrefType, dynamicSizes);
+    // Place deallocation at the end of the block.
+    rewriter.setInsertionPoint(rewriter.getInsertionBlock()->getTerminator());
+    rewriter.create<memref::DeallocOp>(loc, alloc);
+  } else if (options.allocOp ==
+             linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloca) {
+    alloc = rewriter.create<memref::AllocaOp>(loc, memrefType, dynamicSizes);
+    // No dealloc is needed.
+  }
 
   return alloc;
 }
@@ -213,8 +222,8 @@ Value linalg::bufferizeToAllocation(
   Location loc = padOp.getLoc();
 
   // Create buffer allocation.
-  Value alloc =
-      createAllocationForTensor(rewriter, loc, padOp.getResult(), memorySpace);
+  Value alloc = createAllocationForTensor(rewriter, loc, padOp.getResult(),
+                                          options, memorySpace);
   rewriter.setInsertionPoint(padOp);
 
   if (!padOp.hasZeroLowPad() || !padOp.hasZeroHighPad()) {
@@ -491,8 +500,8 @@ Value linalg::bufferizeToAllocation(
   rewriter.setInsertionPoint(insertionPoint ? insertionPoint : op);
   SmallVector<Value> allocs;
   for (OpOperand *operand : outOfPlaceOperands) {
-    Value alloc = createAllocationForTensor(rewriter, op->getLoc(),
-                                            operand->get(), memorySpace);
+    Value alloc = createAllocationForTensor(
+        rewriter, op->getLoc(), operand->get(), options, memorySpace);
     allocs.push_back(alloc);
     if (!state.findDefinitions(operand->get()).empty()) {
       // Initialize buffer with a copy of the operand data. Not needed if the

diff  --git a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
index dcac1f77a8b4fc..36f76d3785d6ec 100644
--- a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
@@ -50,6 +50,7 @@ transform.sequence failures(propagate) {
 // CHECK-LABEL: func @tensor_pad_constant_with_custom_copy(
 //   CHECK-NOT:   memref.tensor_store
 //   CHECK-NOT:   memref.copy
+//       CHECK:   memref.alloca
 //       CHECK:   linalg.copy
 func.func @tensor_pad_constant_with_custom_copy(
     %t: tensor<?x10xindex>, %l2: index, %h1: index, %h2: index)
@@ -66,7 +67,7 @@ func.func @tensor_pad_constant_with_custom_copy(
 transform.sequence failures(propagate) {
 ^bb1(%arg1: !transform.any_op):
   %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 3, memcpy_op = "linalg.copy"}: !transform.any_op
+  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 3, alloc_op = "memref.alloca", memcpy_op = "linalg.copy"}: !transform.any_op
 
   // Ensure that one linalg.fill was generated.
   %fill_op = transform.select "linalg.fill" in %new : (!transform.any_op) -> !transform.any_op
@@ -78,6 +79,11 @@ transform.sequence failures(propagate) {
   // expected-remark @below{{1}}
   test_print_number_of_associated_payload_ir_ops %linalg_copy : !transform.any_op
 
+  // Ensure that one memref.alloca was generated.
+  %alloca = transform.select "memref.alloca" in %new : (!transform.any_op) -> !transform.any_op
+  // expected-remark @below{{1}}
+  test_print_number_of_associated_payload_ir_ops %alloca : !transform.any_op
+
   // Make sure that One-Shot Bufferize can bufferize the rest.
   %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
 }


        


More information about the Mlir-commits mailing list