[Mlir-commits] [mlir] 6f1e23b - [MLIR][Bufferization] Choose default memory space in tensor copy insertion (#88500)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Apr 12 08:56:50 PDT 2024
Author: Kunwar Grover
Date: 2024-04-12T17:56:46+02:00
New Revision: 6f1e23b47d428d792866993ed26f4173d479d43d
URL: https://github.com/llvm/llvm-project/commit/6f1e23b47d428d792866993ed26f4173d479d43d
DIFF: https://github.com/llvm/llvm-project/commit/6f1e23b47d428d792866993ed26f4173d479d43d.diff
LOG: [MLIR][Bufferization] Choose default memory space in tensor copy insertion (#88500)
Tensor copy insertion currently uses memory_space = 0 when creating a
tensor copy using alloc_tensor. This memory space should instead be the
default memory space provided in bufferization options.
Added:
Modified:
mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index 55c9299c58effd..c2b2b99fc0083b 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -193,10 +193,11 @@ FailureOr<Value> bufferization::allocateTensorForShapedValue(
FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
if (failed(copyBufferType))
return failure();
- Attribute memorySpace = copyBufferType->getMemorySpace();
+ std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
if (!memorySpace)
- memorySpace = b.getI64IntegerAttr(0);
- allocTensorOp.setMemorySpaceAttr(memorySpace);
+ memorySpace = options.defaultMemorySpaceFn(tensorType);
+ if (memorySpace.has_value())
+ allocTensorOp.setMemorySpaceAttr(memorySpace.value());
return allocTensorOp.getResult();
}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
index 72cf08df5978cf..55dcca193cbe49 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
@@ -34,7 +34,7 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
{
// The second alloc_tensor should not have a copy operand.
// CHECK: bufferization.alloc_tensor() : tensor<5xf32>
- // CHECK: bufferization.alloc_tensor() {memory_space = 0 : i64} : tensor<5xf32>
+ // CHECK: bufferization.alloc_tensor() : tensor<5xf32>
%0 = bufferization.alloc_tensor() : tensor<5xf32>
%1 = tensor.insert %f into %0[%idx] : tensor<5xf32>
return %0, %1 : tensor<5xf32>, tensor<5xf32>
@@ -46,7 +46,7 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32)
-> (tensor<5xf32>, tensor<5xf32>)
{
- // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {memory_space = 0 : i64} : tensor<5xf32>
+ // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() : tensor<5xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
@@ -65,7 +65,7 @@ func.func @do_not_copy_when_result_not_read(%t: tensor<5xf32>, %f: f32)
-> (tensor<3xf32>)
{
%0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32>
- // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {memory_space = 0 : i64} : tensor<3xf32>
+ // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() : tensor<3xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
index 6fc2db481c6fd4..fcd69bea426d67 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
@@ -63,7 +63,7 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64>
-// CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {memory_space = 0 : i64} : tensor<8x8xf64>
+// CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64>
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
More information about the Mlir-commits
mailing list