[Mlir-commits] [mlir] [mlir][Bufferization] Add support for controlled bufferization of alloc_tensor (PR #70957)
Nicolas Vasilache
llvmlistbot at llvm.org
Wed Nov 1 09:46:36 PDT 2023
https://github.com/nicolasvasilache created https://github.com/llvm/llvm-project/pull/70957
This revision adds support to `transform.structured.bufferize_to_allocation` to bufferize `bufferization.alloc_tensor()` ops.
This is useful as a means path to control the bufferization of `tensor.empty` ops that have bene previously `bufferization.empty_tensor_to_alloc_tensor`'ed.
>From 16e7b59eca70d7e76345d7ba9158645e8a6fdb1b Mon Sep 17 00:00:00 2001
From: Nicolas Vasilache <nicolas.vasilache at gmail.com>
Date: Wed, 1 Nov 2023 14:43:45 +0000
Subject: [PATCH 1/2] [mlir][Vector] Add initial support for inlining in the
presence of vector ops
---
mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 16 ++++++++++++++++
mlir/test/Dialect/Vector/inlining.mlir | 14 ++++++++++++++
2 files changed, 30 insertions(+)
create mode 100644 mlir/test/Dialect/Vector/inlining.mlir
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 4e34caa6d8aaba8..60416f550ee619d 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -33,6 +33,7 @@
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Interfaces/ValueBoundsOpInterface.h"
#include "mlir/Support/LLVM.h"
+#include "mlir/Transforms/InliningUtils.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
@@ -348,6 +349,19 @@ struct BitmaskEnumStorage : public AttributeStorage {
// VectorDialect
//===----------------------------------------------------------------------===//
+namespace {
+/// This class defines the interface for handling inlining with vector dialect
+/// operations.
+struct VectorInlinerInterface : public DialectInlinerInterface {
+ using DialectInlinerInterface::DialectInlinerInterface;
+
+ /// All vector dialect ops can be inlined.
+ bool isLegalToInline(Operation *, Region *, bool, IRMapping &) const final {
+ return true;
+ }
+};
+} // namespace
+
void VectorDialect::initialize() {
addAttributes<
#define GET_ATTRDEF_LIST
@@ -358,6 +372,8 @@ void VectorDialect::initialize() {
#define GET_OP_LIST
#include "mlir/Dialect/Vector/IR/VectorOps.cpp.inc"
>();
+
+ addInterfaces<VectorInlinerInterface>();
}
/// Materialize a single constant operation from a given attribute value with
diff --git a/mlir/test/Dialect/Vector/inlining.mlir b/mlir/test/Dialect/Vector/inlining.mlir
new file mode 100644
index 000000000000000..053a115613ff641
--- /dev/null
+++ b/mlir/test/Dialect/Vector/inlining.mlir
@@ -0,0 +1,14 @@
+// RUN: mlir-opt %s -inline | FileCheck %s
+
+func.func @inner_func_inlinable(%v: f32) -> vector<4xf32> {
+ %1 = vector.broadcast %v : f32 to vector<4xf32>
+ return %1 : vector<4xf32>
+}
+
+// CHECK-LABEL: func.func @test_inline(
+// CHECK-NOT: func.call
+// CHECK-NEXT: vector.broadcast
+func.func @test_inline(%v: f32) -> vector<4xf32> {
+ %0 = call @inner_func_inlinable(%v) : (f32) -> vector<4xf32>
+ return %0 : vector<4xf32>
+}
>From ed0c0ce145b40d44fdf5d0119bca9a11857d5686 Mon Sep 17 00:00:00 2001
From: Nicolas Vasilache <nicolas.vasilache at gmail.com>
Date: Wed, 1 Nov 2023 15:29:02 +0000
Subject: [PATCH 2/2] [mlir][Bufferization] Add support for controlled
bufferization of alloc_tensor
This revision adds support to `transform.structured.bufferize_to_allocation` to
bufferize `bufferization.alloc_tensor()` ops.
This is useful as a means path to control the bufferization of `tensor.empty`
ops that have bene previously `bufferization.empty_tensor_to_alloc_tensor`'ed.
---
.../Dialect/Linalg/Transforms/Transforms.h | 14 +++++++++++
.../Transforms/ConvertToDestinationStyle.cpp | 23 +++++++++++++++++++
.../Transforms/transform-ops.mlir | 23 +++++++++++++++++++
3 files changed, 60 insertions(+)
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 122f73562852101..abd996bdbaf852b 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -12,6 +12,7 @@
#include <utility>
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/Utils/Utils.h"
@@ -28,6 +29,7 @@
namespace mlir {
namespace bufferization {
+class AllocTensorOp;
class OneShotAnalysisState;
} // namespace bufferization
@@ -110,6 +112,18 @@ Value bufferizeToAllocation(RewriterBase &rewriter,
vector::MaskOp maskOp, Attribute memorySpace = {},
Operation *insertionPoint = nullptr);
+/// Materialize a buffer allocation for the given bufferization.alloc_tensor op
+/// and lower the op to memref.alloc + memref.tensor_store.
+///
+/// In addition to rewriting the IR, this function returns the newly allocated
+/// buffer. The `insertionPoint` parameter can be used to specify a custom
+/// insertion point for the buffer allocation.
+Value bufferizeToAllocation(RewriterBase &rewriter,
+ const BufferizeToAllocationOptions &options,
+ bufferization::AllocTensorOp allocTensorOp,
+ Attribute memorySpace = {},
+ Operation *insertionPoint = nullptr);
+
/// Bufferize the given op with tensor semantics and materialize the result in
/// a newly allocated buffer.
///
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
index f7340844f7e1977..311540fde512b9b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
@@ -317,6 +317,27 @@ Value linalg::bufferizeToAllocation(
return alloc;
}
+Value linalg::bufferizeToAllocation(
+ RewriterBase &rewriter, const linalg::BufferizeToAllocationOptions &options,
+ bufferization::AllocTensorOp allocTensorOp, Attribute memorySpace,
+ Operation *insertionPoint) {
+ Location loc = allocTensorOp.getLoc();
+ OpBuilder::InsertionGuard g(rewriter);
+ rewriter.setInsertionPoint(insertionPoint ? insertionPoint : allocTensorOp);
+ bufferization::BufferizationOptions bufferizationOptions;
+
+ // Create buffer allocation.
+ Value alloc = createAllocationForTensor(
+ rewriter, loc, allocTensorOp.getResult(), options, memorySpace);
+
+ // Create bufferization.to_tensor with "restrict" and "writable". The returned
+ // tensor is a new buffer allocation, so it does not alias with any buffer.
+ Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
+ loc, alloc, /*restrict=*/true, /*writable=*/true);
+ rewriter.replaceOp(allocTensorOp, toTensorOp);
+ return alloc;
+}
+
/// Lower tensor.from_elements to a sequence of chained tensor.insert.
FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
RewriterBase &rewriter, tensor::FromElementsOp fromElementsOp) {
@@ -454,6 +475,8 @@ Value linalg::bufferizeToAllocation(
return bufferizeToAllocation(rewriter, options, padOp, memorySpace);
if (auto maskOp = dyn_cast<vector::MaskOp>(op))
return bufferizeToAllocation(rewriter, options, maskOp, memorySpace);
+ if (auto allocTensorOp = dyn_cast<bufferization::AllocTensorOp>(op))
+ return bufferizeToAllocation(rewriter, options, allocTensorOp, memorySpace);
// Only bufferizable ops are supported.
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
diff --git a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
index 8d52d9900a793e2..3c50a9e72d9d9b3 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
@@ -215,3 +215,26 @@ func.func @buffer_loop_hoisting(%lb: index, %ub: index, %step: index, %f: f32, %
}
return
}
+
+// -----
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %alloc_tensor = transform.structured.match ops{["bufferization.alloc_tensor"]} in %arg1
+ : (!transform.any_op) -> !transform.op<"bufferization.alloc_tensor">
+ %2, %new = transform.structured.bufferize_to_allocation %alloc_tensor
+ {alloc_op = "memref.alloca"}
+ : !transform.op<"bufferization.alloc_tensor">
+ transform.yield
+ }
+}
+
+// Expect `bufferization.bufferize_to_allocation` to create an alloc.
+// CHECK-LABEL: func.func @empty_to_tensor_alloc()
+func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
+ // CHECK-NEXT: %[[alloca:.*]] = memref.alloca() : memref<2x2xf32>
+ // CHECK-NEXT: %[[tensor:.*]] = bufferization.to_tensor %[[alloca]] restrict writable : memref<2x2xf32>
+ // CHECK-NEXT: return %[[tensor]] : tensor<2x2xf32>
+ %0 = bufferization.alloc_tensor() : tensor<2x2xf32>
+ return %0 : tensor<2x2xf32>
+}
More information about the Mlir-commits
mailing list