[Mlir-commits] [mlir] [mlir][bufferization][NFC] Rename to_memref to to_buffer (PR #137180)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Apr 24 06:44:29 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-bufferization
Author: Andrei Golubev (andrey-golubev)
<details>
<summary>Changes</summary>
As part of the work on transitioning bufferization dialect, ops, and associated logic to operate on newly added type interfaces (see 00eaff3e9c897c263a879416d0f151d7ca7eeaff), rename the bufferization.to_memref to highlight the generic nature of the op.
Bufferization process produces buffers while memref is a builtin type rather than a generic term.
Preserve the current API (to_buffer still produces a memref), however, as the new type interfaces are not used yet.
---
Patch is 287.88 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/137180.diff
85 Files Affected:
- (modified) mlir/docs/Bufferization.md (+7-7)
- (modified) mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h (+2-2)
- (modified) mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h (+4-4)
- (modified) mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td (+6-6)
- (modified) mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h (+1-1)
- (modified) mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td (+3-3)
- (modified) mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td (+8-8)
- (modified) mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp (+1-1)
- (modified) mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp (+8-8)
- (modified) mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp (+44-44)
- (modified) mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp (+19-19)
- (modified) mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp (+3-3)
- (modified) mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp (+1-1)
- (modified) mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp (+4-4)
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp (+1-1)
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp (+1-1)
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp (+1-1)
- (modified) mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp (+3-3)
- (modified) mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir (+1-1)
- (modified) mlir/test/Dialect/Affine/loop-fusion-4.mlir (+1-1)
- (modified) mlir/test/Dialect/Arith/bufferize.mlir (+3-3)
- (modified) mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir (+2-2)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir (+2-2)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir (+10-10)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir (+6-6)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir (+11-11)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir (+8-8)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir (+6-6)
- (modified) mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir (+5-5)
- (modified) mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir (+2-2)
- (modified) mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir (+3-3)
- (modified) mlir/test/Dialect/Bufferization/canonicalize.mlir (+16-16)
- (modified) mlir/test/Dialect/Bufferization/ops.mlir (+4-4)
- (modified) mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir (+2-2)
- (modified) mlir/test/Dialect/Linalg/bufferize.mlir (+7-7)
- (modified) mlir/test/Dialect/Linalg/hoisting.mlir (+2-2)
- (modified) mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir (+2-2)
- (modified) mlir/test/Dialect/MemRef/normalize-memrefs.mlir (+1-1)
- (modified) mlir/test/Dialect/SCF/bufferize.mlir (+6-6)
- (modified) mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir (+4-4)
- (modified) mlir/test/Dialect/Shape/bufferize.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir (+3-3)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/constant_index_map.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/dense.mlir (+3-3)
- (modified) mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sorted_coo.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_1d.mlir (+30-30)
- (modified) mlir/test/Dialect/SparseTensor/sparse_2d.mlir (+39-39)
- (modified) mlir/test/Dialect/SparseTensor/sparse_3d.mlir (+41-41)
- (modified) mlir/test/Dialect/SparseTensor/sparse_affine.mlir (+8-8)
- (modified) mlir/test/Dialect/SparseTensor/sparse_batch.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir (+11-11)
- (modified) mlir/test/Dialect/SparseTensor/sparse_fusion.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir (+17-17)
- (modified) mlir/test/Dialect/SparseTensor/sparse_kernels.mlir (+9-9)
- (modified) mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_lower.mlir (+4-4)
- (modified) mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir (+4-4)
- (modified) mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir (+4-4)
- (modified) mlir/test/Dialect/SparseTensor/sparse_nd.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir (+3-3)
- (modified) mlir/test/Dialect/SparseTensor/sparse_pack.mlir (+6-6)
- (modified) mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_perm.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_scalars.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir (+5-5)
- (modified) mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/spy_sddmm.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/unused-tensor.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir (+14-14)
- (modified) mlir/test/Dialect/Tensor/bufferize.mlir (+21-21)
- (modified) mlir/test/Dialect/Vector/bufferize.mlir (+3-3)
- (modified) mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir (+2-2)
- (modified) mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir (+2-2)
- (modified) mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir (+2-2)
- (modified) mlir/utils/tree-sitter-mlir/dialect/bufferization.js (+20-25)
- (modified) mlir/utils/tree-sitter-mlir/queries/highlights.scm (+1-1)
``````````diff
diff --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md
index 02cfee5f2b8dc..e04934a120a00 100644
--- a/mlir/docs/Bufferization.md
+++ b/mlir/docs/Bufferization.md
@@ -202,13 +202,13 @@ e.g.:
%2 = "my_dialect.yet_another_op"(%0) : (tensor<?xf32>) -> (tensor<?xf32>)
```
-## Tensor / MemRef Boundary
+## Tensor / Buffer Boundary
The bufferization dialect provides a few helper ops to connect tensor IR (that
should be bufferized) with existing buffers (that may be allocated/provided by
a different runtime/library/etc.).
-`bufferization.to_memref %t` returns the future buffer of a tensor SSA value.
+`bufferization.to_buffer %t` returns the future buffer of a tensor SSA value.
`bufferization.to_tensor %m` returns a tensor SSA value for a given MemRef
buffer. `bufferization.materialize_in_destination` indicates that a tensor value
should materialize in a certain buffer.
@@ -268,7 +268,7 @@ By default, One-Shot Bufferize fails when it encounters an op with tensor
semantics (i.e., tensor result or tensor operand) that is not bufferizable
(i.e., does not implement `BufferizableOpInterface`). This can be avoided with
`allow-unknown-ops`. In that case, One-Shot Bufferize inserts
-`to_memref`/`to_tensor` ops around the bufferization boundary.
+`to_buffer`/`to_tensor` ops around the bufferization boundary.
One-Shot Bufferize can be configured to bufferize only ops from a set of
dialects with `dialect-filter`.
@@ -291,7 +291,7 @@ memref. The layout map of the memref type can be controlled with
One-Shot Bufferize bufferizes ops from top to bottom. This works well when all
ops are bufferizable. However, when encountering a non-bufferizable tensor with
-`allow-unknown-ops`, One-Shot Bufferize must insert `to_memref` ops at the
+`allow-unknown-ops`, One-Shot Bufferize must insert `to_buffer` ops at the
bufferization boundary and decide on a memref type. By default, One-Shot
Bufferize choose the most dynamic memref type wrt. layout maps. E.g.:
@@ -300,12 +300,12 @@ Bufferize choose the most dynamic memref type wrt. layout maps. E.g.:
%1 = tensor.extract %0[%idx1, %idx2] : tensor<?xf32>
```
-When bufferizing the above IR, One-Shot Bufferize inserts a `to_memref` ops with
+When bufferizing the above IR, One-Shot Bufferize inserts a `to_buffer` ops with
dynamic offset and strides:
```mlir
%0 = "my_dialect.unbufferizable_op(%t) : (tensor<?x?xf32>) -> (tensor<?x?xf32>)
-%0_m = bufferization.to_memref %0 : memref<?x?xf32, strided<[?, ?], offset: ?>>
+%0_m = bufferization.to_buffer %0 : memref<?x?xf32, strided<[?, ?], offset: ?>>
%1 = memref.load %0_m[%idx1, %idx2] : memref<?x?xf32, strided<[?, ?], offset: ?>>
```
@@ -335,7 +335,7 @@ generation of layout maps when no precise layout can be inferred:
* `identity-layout-map` uses static identity layout maps. This option can be
useful for legacy code that cannot handle memref types with layout maps.
Note that this setting can lead to additional buffer copies when folding a
- `to_tensor`/`to_memref` pair with memref types that are not cast-compatible.
+ `to_tensor`/`to_buffer` pair with memref types that are not cast-compatible.
Note: The `unknown-type-conversion` option does not affect layout maps of
function signatures. There is a separate `function-signature-type-conversion`
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index ada9539e87121..cb6ef8bc17220 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -302,7 +302,7 @@ struct BufferizationOptions {
Value to) const;
/// Specifies whether not bufferizable ops are allowed in the input. If so,
- /// bufferization.to_memref and bufferization.to_tensor ops are inserted at
+ /// bufferization.to_buffer and bufferization.to_tensor ops are inserted at
/// the boundaries.
bool allowUnknownOps = false;
@@ -587,7 +587,7 @@ allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue,
bool copy = true);
/// Lookup the buffer for the given value. If the value was not bufferized
-/// yet, wrap it in a ToMemrefOp. Otherwise, it is the result of a ToTensorOp,
+/// yet, wrap it in a ToBufferOp. Otherwise, it is the result of a ToTensorOp,
/// from which the memref operand is returned.
FailureOr<Value> getBuffer(RewriterBase &rewriter, Value value,
const BufferizationOptions &options);
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h
index 6f19dca2e8222..1ef5370802953 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h
@@ -56,10 +56,10 @@ FailureOr<Value> castOrReallocMemRefValue(OpBuilder &b, Value value,
MemRefType type,
const BufferizationOptions &options);
-/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the
-/// to_memref op are different, a memref.cast is needed.
-LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter,
- ToMemrefOp toMemref,
+/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the
+/// to_buffer op are different, a memref.cast is needed.
+LogicalResult foldToBufferToTensorPair(RewriterBase &rewriter,
+ ToBufferOp toBuffer,
const BufferizationOptions &options);
/// Add the canonicalization patterns for bufferization.dealloc to the given
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index fad78a63444b9..7a1a701bea6dc 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -394,7 +394,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
An operation that creates a tensor from a `memref`. The result value is a
tensor whose shape and element type match the memref operand.
- The opposite of this op is `to_memref`. Together, these two ops are
+ The opposite of this op is `to_buffer`. Together, these two ops are
useful for source/target materializations when doing type conversions
involving tensors and memrefs.
@@ -459,7 +459,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
LogicalResult bufferize(RewriterBase &rewriter,
const BufferizationOptions &options) const {
- // to_tensor/to_memref pairs fold away after bufferization.
+ // to_tensor/to_buffer pairs fold away after bufferization.
return success();
}
@@ -490,10 +490,10 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
//===----------------------------------------------------------------------===//
-// ToMemrefOp
+// ToBufferOp
//===----------------------------------------------------------------------===//
-def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
+def Bufferization_ToBufferOp : Bufferization_Op<"to_buffer", [
BufferizableOpInterface,
SameOperandsAndResultShape,
SameOperandsAndResultElementType,
@@ -507,7 +507,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
```mlir
// Result type is memref<4x?xf32, #layout, 0>
- %m = bufferization.to_memref %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0>
+ %m = bufferization.to_buffer %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0>
```
This operation is a specialized variant of the built-in
@@ -527,7 +527,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
// BufferizableOpInterface implementation
//===------------------------------------------------------------------===//
- // Note: ToMemrefOp / ToTensorOp are temporary ops that are inserted at the
+ // Note: ToBufferOp / ToTensorOp are temporary ops that are inserted at the
// bufferization boundary. When One-Shot bufferization is complete, there
// should be no such ops left over. If `allowUnknownOps` (or after running a
// partial bufferization pass), such ops may be part of the resulting IR,
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h
index 2f495d304b4a5..d5cb8d8eb673c 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h
@@ -50,7 +50,7 @@ LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options,
/// Bufferize the signature of `block` and its callers (i.e., ops that have the
/// given block as a successor). All block argument types are changed to memref
/// types. All corresponding operands of all callers are wrapped in
-/// bufferization.to_memref ops. All uses of bufferized tensor block arguments
+/// bufferization.to_buffer ops. All uses of bufferized tensor block arguments
/// are wrapped in bufferization.to_tensor ops.
///
/// It is expected that all callers implement the `BranchOpInterface`.
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
index ee33476f441ee..a0d113c150c5e 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
@@ -47,7 +47,7 @@ def OwnershipBasedBufferDeallocationPass
Otherwise, the pass that bufferizes the remaining tensors is responsible to
add the corresponding deallocation operations. Note that this pass does not
consider any values of tensor type and assumes that MemRef values defined by
- `bufferization.to_memref` do not return ownership and do not have to be
+ `bufferization.to_buffer` do not return ownership and do not have to be
deallocated. `bufferization.to_tensor` operations are handled similarly to
`bufferization.clone` operations with the exception that the result value is
not handled because it's a tensor (not a MemRef).
@@ -321,7 +321,7 @@ def OneShotBufferizePass : Pass<"one-shot-bufferize", "ModuleOp"> {
One-Shot Bufferize will by default reject IR that contains non-bufferizable
op, i.e., ops that do not implemement BufferizableOpInterface. Such IR can
- be allowed with `allow-unknown-ops=1`. In that case, to_memref and to_tensor
+ be allowed with `allow-unknown-ops=1`. In that case, to_buffer and to_tensor
ops will be generated at the bufferization boundary. This is useful for
compatibility with existing partial bufferization passes: These can
bufferize the remaining IR after running One-Shot Bufferize.
@@ -341,7 +341,7 @@ def OneShotBufferizePass : Pass<"one-shot-bufferize", "ModuleOp"> {
One-Shot Bufferize will by default assume memref types with fully dynamic
layout maps when a precise layout cannot be inferred. E.g., this is the case
- when wrapping a non-bufferizable op in to_memref/to_tensor ops. This
+ when wrapping a non-bufferizable op in to_buffer/to_tensor ops. This
behavior can be overridden with `unknown-type-conversion`. Valid values are
`fully-dynamic-layout-map` and `identity-layout-map`.
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 2c281c9f6aa85..a61d90a0c39b1 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -266,9 +266,9 @@ def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions",
let summary = "Extracts the `level`-th positions array of the `tensor`";
let description = [{
Returns the positions array of the tensor's storage at the given
- level. This is similar to the `bufferization.to_memref` operation
+ level. This is similar to the `bufferization.to_buffer` operation
in the sense that it provides a bridge between a tensor world view
- and a bufferized world view. Unlike the `bufferization.to_memref`
+ and a bufferized world view. Unlike the `bufferization.to_buffer`
operation, however, this sparse operation actually lowers into code
that extracts the positions array from the sparse storage itself
(either by calling a support library or through direct code).
@@ -295,9 +295,9 @@ def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates",
let summary = "Extracts the `level`-th coordinates array of the `tensor`";
let description = [{
Returns the coordinates array of the tensor's storage at the given
- level. This is similar to the `bufferization.to_memref` operation
+ level. This is similar to the `bufferization.to_buffer` operation
in the sense that it provides a bridge between a tensor world view
- and a bufferized world view. Unlike the `bufferization.to_memref`
+ and a bufferized world view. Unlike the `bufferization.to_buffer`
operation, however, this sparse operation actually lowers into code
that extracts the coordinates array from the sparse storage itself
(either by calling a support library or through direct code).
@@ -326,9 +326,9 @@ def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer",
Returns the linear coordinates array for a sparse tensor with
a trailing COO region with at least two levels. It is an error
if the tensor doesn't contain such a COO region. This is similar
- to the `bufferization.to_memref` operation in the sense that it
+ to the `bufferization.to_buffer` operation in the sense that it
provides a bridge between a tensor world view and a bufferized
- world view. Unlike the `bufferization.to_memref` operation,
+ world view. Unlike the `bufferization.to_buffer` operation,
however, this operation actually lowers into code that extracts
the linear coordinates array from the sparse storage scheme that
stores the coordinates for the COO region as an array of structures.
@@ -359,9 +359,9 @@ def SparseTensor_ToValuesOp : SparseTensor_Op<"values",
let description = [{
Returns the values array of the sparse storage format for the given
sparse tensor, independent of the actual dimension. This is similar to
- the `bufferization.to_memref` operation in the sense that it provides a bridge
+ the `bufferization.to_buffer` operation in the sense that it provides a bridge
between a tensor world view and a bufferized world view. Unlike the
- `bufferization.to_memref` operation, however, this sparse operation actually
+ `bufferization.to_buffer` operation, however, this sparse operation actually
lowers into code that extracts the values array from the sparse storage
scheme (either by calling a support library or through direct code).
diff --git a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp
index cafbf835de22f..823d4d644f586 100644
--- a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp
+++ b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp
@@ -576,7 +576,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
auto tensorType = MemRefType::get(
dstShape, cast<ShapedType>(array.getType()).getElementType());
array =
- rewriter.create<bufferization::ToMemrefOp>(loc, tensorType, array);
+ rewriter.create<bufferization::ToBufferOp>(loc, tensorType, array);
}
auto rank = cast<ShapedType>(array.getType()).getRank();
auto opSplitAxes = adaptor.getSplitAxes().getAxes();
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index 99ffa62c41a4d..1fc34051680f1 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -624,8 +624,8 @@ bool AnalysisState::canOmitTensorCopy(OpOperand &opOperand) const {
}
bool AnalysisState::isInPlace(OpOperand &opOperand) const {
- // ToMemrefOps are always in-place.
- if (isa<ToMemrefOp>(opOperand.getOwner()))
+ // ToBufferOps are always in-place.
+ if (isa<ToBufferOp>(opOperand.getOwner()))
return true;
// In the absence of analysis information, OpOperands that bufferize to a
@@ -650,13 +650,13 @@ bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const {
return false;
}
-// bufferization.to_memref is not allowed to change the rank.
-static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
+// bufferization.to_buffer is not allowed to change the rank.
+static void ensureToBufferOpIsValid(Value tensor, Type memrefType) {
#ifndef NDEBUG
auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.getType());
assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
rankedTensorType.getRank()) &&
- "to_memref would be invalid: mismatching ranks");
+ "to_buffer would be invalid: mismatching ranks");
#endif
}
@@ -671,15 +671,15 @@ FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
return toTensorOp.getMemref();
- // Insert to_memref op.
+ // Insert to_buffer op.
OpBuilder::InsertionGuard g(rewriter);
setInsertionPointAfter(rewriter, value);
FailureOr<BaseMemRefType> memrefType = getBufferType(value, options);
if (failed(memrefType))
return failure();
- ensureToMemrefOpIsValid(value, *memrefType);
+ ensureToBufferOpIsValid(value, *memrefType);
return rewriter
- .create<bufferization::ToMemrefOp>(value.getLoc(), *memrefType, value)
+ .create<bufferization::ToBufferOp>(value.getLoc(), *memrefType, value)
.getResult();
}
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 4fce9be390bd6..ecd2ef15546a4 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -81,21 +81,21 @@ FailureOr<Value> mlir::bufferization::castOrReallocMemRefValue(
return copy;
}
-/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the
-/// to_memref op are different, a memref.cast is needed.
-LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
- RewriterBase &rewriter, ToMemrefOp toMemref,
+/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the
+/// to_buffer op are different, a memref.cast is needed.
+LogicalResult mlir::bufferization::foldToBufferToTensorPair(
+ RewriterBase &rewriter, ToBufferOp toBuffer,
const BufferizationOptions &options) {
- auto memrefToTensor = toMemref.getTensor().getDefiningOp<ToTensorOp>();
- if (!memrefToTensor)
+ auto bufferToTensor = toBuffer.getTensor().getDefiningOp<ToTensorOp>();
+ if (!bufferToTensor)
return failure();
- Type srcType = memrefToTensor.getMemref().getType();
- Type destType = toMemref.getType();
+ Type srcType = bufferToTensor.getMemref().getType();
+ Type destType = toBuffer.getType();
// Directly rewrite if the type did not change.
if (srcType == destType) {
- rewriter.replaceOp(toMemref, memrefToTensor.getMemref());
+ rewriter.replaceOp(toBuffer, bufferToTensor.getMemref());
return success();
}
@@ -106,11 +106,11 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
// Ranked memref -> Ranked memref cast.
if (rankedSrcType && rankedDestType) {
FailureOr<Value> replacement = castOrReallocMemRefValue(
- rewriter, memrefToTensor.getMemref(), rankedDestType, options);
+ rewriter, bufferToTensor.getMemref(), rankedDestType, options);
if (failed(replacement))
return failure();
- rewriter.replaceOp(toMemref, *replacement);
+ rewriter.replaceOp(toBuffer, *replacement);
return success();
}
@@ -123,8...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/137180
More information about the Mlir-commits
mailing list