[Mlir-commits] [mlir] 9bb6337 - [mlir][bufferization] Support general Attribute as memory space
Lei Zhang
llvmlistbot at llvm.org
Mon Nov 21 06:43:59 PST 2022
Author: Lei Zhang
Date: 2022-11-21T09:40:50-05:00
New Revision: 9bb633741a508bf67189b19efef442f317be31dd
URL: https://github.com/llvm/llvm-project/commit/9bb633741a508bf67189b19efef442f317be31dd
DIFF: https://github.com/llvm/llvm-project/commit/9bb633741a508bf67189b19efef442f317be31dd.diff
LOG: [mlir][bufferization] Support general Attribute as memory space
MemRef has been accepting a general Attribute as memory space for
a long time. This commits updates bufferization side to catch up,
which allows downstream users to plugin customized symbolic memory
space. This also eliminates quite a few `getMemorySpaceAsInt`
calls, which is deprecated.
Reviewed By: springerm
Differential Revision: https://reviews.llvm.org/D138330
Added:
Modified:
mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
mlir/test/Dialect/Bufferization/invalid.mlir
mlir/test/Dialect/Bufferization/ops.mlir
mlir/test/Dialect/SCF/one-shot-bufferize.mlir
mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index a5324e1345af1..5ea15f94a2c2c 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -187,7 +187,7 @@ struct BufferizationOptions {
/// Tensor -> MemRef type converter.
/// Parameters: Value, memory space, bufferization options
using UnknownTypeConverterFn = std::function<BaseMemRefType(
- Value, unsigned, const BufferizationOptions &)>;
+ Value, Attribute memorySpace, const BufferizationOptions &)>;
BufferizationOptions();
@@ -234,9 +234,9 @@ struct BufferizationOptions {
bool bufferizeFunctionBoundaries = false;
/// The default memory space that should be used when it cannot be inferred
- /// from the context. If no default memory space is specified, bufferization
- /// fails when the memory space cannot be inferred at any point.
- Optional<unsigned> defaultMemorySpace = 0;
+ /// from the context. If case of llvm::None, bufferization fails when the
+ /// memory space cannot be inferred at any point.
+ Optional<Attribute> defaultMemorySpace = Attribute();
/// Certain ops have aliasing OpOperand/OpResult invariants (e.g., scf.for).
/// If this flag is set to `false`, those invariants are no longer enforced
@@ -547,17 +547,19 @@ bool shouldDeallocateOpResult(OpResult opResult,
/// canonicalizations are currently not implemented.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options,
MemRefLayoutAttrInterface layout = {},
- unsigned memorySpace = 0);
+ Attribute memorySpace = nullptr);
/// Return a MemRef type with fully dynamic layout. If the given tensor type
/// is unranked, return an unranked MemRef type.
-BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
- unsigned memorySpace = 0);
+BaseMemRefType
+getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
+ Attribute memorySpace = nullptr);
/// Return a MemRef type with a static identity layout (i.e., no layout map). If
/// the given tensor type is unranked, return an unranked MemRef type.
-BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
- unsigned memorySpace = 0);
+BaseMemRefType
+getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
+ Attribute memorySpace = nullptr);
/// Return the owner of the given value. In case of a BlockArgument that is the
/// owner of the block. In case of an OpResult that is the defining op.
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index b5df91f778fb7..74ed475dc74d6 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -81,7 +81,7 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
let arguments = (ins Variadic<Index>:$dynamic_sizes,
Optional<AnyTensor>:$copy,
Optional<Index>:$size_hint,
- OptionalAttr<UI64Attr>:$memory_space);
+ OptionalAttr<AnyAttr>:$memory_space);
let results = (outs AnyTensor:$result);
diff --git a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp
index 2606cb6fc3798..abcebf496fbe2 100644
--- a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -28,7 +28,7 @@ struct ConstantOpInterface
// TODO: Implement memory space for this op. E.g., by adding a memory_space
// attribute to ConstantOp.
- if (options.defaultMemorySpace != static_cast<unsigned>(0))
+ if (options.defaultMemorySpace != Attribute())
return op->emitError("memory space not implemented yet");
// Only ranked tensors are supported.
@@ -188,7 +188,7 @@ struct SelectOpInterface
return failure();
if (*trueType == *falseType)
return *trueType;
- if (trueType->getMemorySpaceAsInt() != falseType->getMemorySpaceAsInt())
+ if (trueType->getMemorySpace() != falseType->getMemorySpace())
return op->emitError("inconsistent memory space on true/false operands");
// If the buffers have
diff erent types, they
diff er only in their layout
@@ -197,7 +197,7 @@ struct SelectOpInterface
return getMemRefTypeWithFullyDynamicLayout(
RankedTensorType::get(memrefType.getShape(),
memrefType.getElementType()),
- memrefType.getMemorySpaceAsInt());
+ memrefType.getMemorySpace());
}
BufferRelation bufferRelation(Operation *op, OpResult opResult,
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index 0c2e13e3c845b..f3a0394a29f05 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -114,9 +114,10 @@ FailureOr<Value> bufferization::allocateTensorForShapedValue(
FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
if (failed(copyBufferType))
return failure();
- allocTensorOp.setMemorySpaceAttr(
- b.getIntegerAttr(b.getIntegerType(64, /*isSigned=*/false),
- copyBufferType->getMemorySpaceAsInt()));
+ Attribute memorySpace = copyBufferType->getMemorySpace();
+ if (!memorySpace)
+ memorySpace = b.getI64IntegerAttr(0);
+ allocTensorOp.setMemorySpaceAttr(memorySpace);
return allocTensorOp.getResult();
}
@@ -258,7 +259,7 @@ bool OpFilter::isOpAllowed(Operation *op) const {
/// Default unknown type converter: Use a fully dynamic layout map.
static BaseMemRefType
-defaultUnknownTypeConverter(Value value, unsigned memorySpace,
+defaultUnknownTypeConverter(Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithFullyDynamicLayout(value.getType().cast<TensorType>(),
memorySpace);
@@ -731,16 +732,14 @@ bool bufferization::isFunctionArgument(Value value) {
BaseMemRefType bufferization::getMemRefType(Value value,
const BufferizationOptions &options,
MemRefLayoutAttrInterface layout,
- unsigned memorySpace) {
+ Attribute memorySpace) {
auto tensorType = value.getType().cast<TensorType>();
- auto memorySpaceAttr = IntegerAttr::get(
- IntegerType::get(tensorType.getContext(), 64), memorySpace);
// Case 1: Unranked memref type.
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
assert(!layout && "UnrankedTensorType cannot have a layout map");
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
- memorySpaceAttr);
+ memorySpace);
}
// Case 2: Ranked memref type with specified layout.
@@ -748,7 +747,7 @@ BaseMemRefType bufferization::getMemRefType(Value value,
if (layout) {
return MemRefType::get(rankedTensorType.getShape(),
rankedTensorType.getElementType(), layout,
- memorySpaceAttr);
+ memorySpace);
}
return options.unknownTypeConverterFn(value, memorySpace, options);
@@ -756,7 +755,7 @@ BaseMemRefType bufferization::getMemRefType(Value value,
BaseMemRefType
bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
- unsigned memorySpace) {
+ Attribute memorySpace) {
// Case 1: Unranked memref type.
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
@@ -764,8 +763,6 @@ bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
}
// Case 2: Ranked memref type.
- auto memorySpaceAttr = IntegerAttr::get(
- IntegerType::get(tensorType.getContext(), 64), memorySpace);
auto rankedTensorType = tensorType.cast<RankedTensorType>();
int64_t dynamicOffset = ShapedType::kDynamic;
SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
@@ -774,14 +771,14 @@ bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
dynamicOffset, dynamicStrides);
return MemRefType::get(rankedTensorType.getShape(),
rankedTensorType.getElementType(), stridedLayout,
- memorySpaceAttr);
+ memorySpace);
}
/// Return a MemRef type with a static identity layout (i.e., no layout map). If
/// the given tensor type is unranked, return an unranked MemRef type.
BaseMemRefType
bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
- unsigned memorySpace) {
+ Attribute memorySpace) {
// Case 1: Unranked memref type.
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
@@ -790,12 +787,10 @@ bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
// Case 2: Ranked memref type.
auto rankedTensorType = tensorType.cast<RankedTensorType>();
- auto memorySpaceAttr = IntegerAttr::get(
- IntegerType::get(tensorType.getContext(), 64), memorySpace);
MemRefLayoutAttrInterface layout = {};
return MemRefType::get(rankedTensorType.getShape(),
rankedTensorType.getElementType(), layout,
- memorySpaceAttr);
+ memorySpace);
}
bool bufferization::detail::defaultIsRepetitiveRegion(
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index e217d20ae470c..da1e69b04d4b7 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -238,7 +238,7 @@ FailureOr<BaseMemRefType> AllocTensorOp::getBufferType(
assert(value == getResult() && "invalid value");
// Compute memory space of this allocation.
- unsigned memorySpace;
+ Attribute memorySpace;
if (getMemorySpace().has_value()) {
memorySpace = *getMemorySpace();
} else if (getCopy()) {
@@ -246,7 +246,7 @@ FailureOr<BaseMemRefType> AllocTensorOp::getBufferType(
bufferization::getBufferType(getCopy(), options, fixedTypes);
if (failed(copyBufferType))
return failure();
- memorySpace = copyBufferType->getMemorySpaceAsInt();
+ memorySpace = copyBufferType->getMemorySpace();
} else if (options.defaultMemorySpace.has_value()) {
memorySpace = *options.defaultMemorySpace;
} else {
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
index 8a33bf379a245..6546d55e6b35c 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
@@ -218,7 +218,7 @@ struct OneShotBufferizePass
// Configure type converter.
LayoutMapOption unknownTypeConversionOption =
parseLayoutMapOption(unknownTypeConversion);
- opt.unknownTypeConverterFn = [=](Value value, unsigned memorySpace,
+ opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
auto tensorType = value.getType().cast<TensorType>();
if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap)
@@ -507,7 +507,7 @@ BufferizationOptions bufferization::getPartialBufferizationOptions() {
options.allowUnknownOps = true;
options.createDeallocs = false;
options.enforceAliasingInvariants = false;
- options.unknownTypeConverterFn = [](Value value, unsigned memorySpace,
+ options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
value.getType().cast<TensorType>(), memorySpace);
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
index 91060dd6b1394..a441b31c49211 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
@@ -86,7 +86,7 @@ getBufferizedFunctionArgType(FuncOp funcOp, int64_t index,
assert(rankedMemrefType && "buffer layout not supported on unranked tensors");
return MemRefType::get(
rankedMemrefType.getShape(), rankedMemrefType.getElementType(),
- layoutAttr.getValue(), rankedMemrefType.getMemorySpaceAsInt());
+ layoutAttr.getValue(), rankedMemrefType.getMemorySpace());
}
/// Return the FuncOp called by `callOp`.
diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
index dcf2fe1577926..ef9350e68e531 100644
--- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -294,14 +294,12 @@ struct IfOpInterface
return thenBufferType;
// Memory space mismatch.
- if (thenBufferType.getMemorySpaceAsInt() !=
- elseBufferType.getMemorySpaceAsInt())
+ if (thenBufferType.getMemorySpace() != elseBufferType.getMemorySpace())
return op->emitError("inconsistent memory space on then/else branches");
// Layout maps are
diff erent: Promote to fully dynamic layout map.
return getMemRefTypeWithFullyDynamicLayout(
- opResult.getType().cast<TensorType>(),
- thenBufferType.getMemorySpaceAsInt());
+ opResult.getType().cast<TensorType>(), thenBufferType.getMemorySpace());
}
BufferRelation bufferRelation(Operation *op, OpResult opResult,
@@ -445,13 +443,12 @@ static FailureOr<BaseMemRefType> computeLoopRegionIterArgBufferType(
auto iterRanked = initArgBufferType->cast<MemRefType>();
assert(llvm::equal(yieldedRanked.getShape(), iterRanked.getShape()) &&
"expected same shape");
- assert(yieldedRanked.getMemorySpaceAsInt() ==
- iterRanked.getMemorySpaceAsInt() &&
+ assert(yieldedRanked.getMemorySpace() == iterRanked.getMemorySpace() &&
"expected same memory space");
#endif // NDEBUG
return getMemRefTypeWithFullyDynamicLayout(
iterArg.getType().cast<RankedTensorType>(),
- yieldedRanked.getMemorySpaceAsInt());
+ yieldedRanked.getMemorySpace());
}
/// Return `true` if the given loop may have 0 iterations.
diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
index b816fad8c2477..8c9cd7d085470 100644
--- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
@@ -33,7 +33,7 @@ getBufferizationOptions(bool analysisOnly) {
// should be disallowed.
options.allowReturnAllocs = true;
options.functionBoundaryTypeConversion = LayoutMapOption::IdentityLayoutMap;
- options.unknownTypeConverterFn = [](Value value, unsigned memorySpace,
+ options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
value.getType().cast<TensorType>(), memorySpace);
diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index ea66663f8a4cd..6ce05c257d94f 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -70,9 +70,8 @@ struct CastOpInterface
layout = rankedMemRefType.getLayout();
// Compute the new memref type.
- Type resultMemRefType =
- getMemRefType(castOp.getResult(), options, layout,
- sourceMemRefType.getMemorySpaceAsInt());
+ Type resultMemRefType = getMemRefType(castOp.getResult(), options, layout,
+ sourceMemRefType.getMemorySpace());
// Replace the op with a memref.cast.
assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
@@ -127,7 +126,7 @@ struct CollapseShapeOpInterface
// If dims cannot be collapsed, this op bufferizes to a new allocation.
RankedTensorType tensorResultType = collapseShapeOp.getResultType();
return bufferization::getMemRefTypeWithStaticIdentityLayout(
- tensorResultType, srcBufferType.getMemorySpaceAsInt());
+ tensorResultType, srcBufferType.getMemorySpace());
}
return memref::CollapseShapeOp::computeCollapsedType(
@@ -188,7 +187,7 @@ struct CollapseShapeOpInterface
auto memrefType =
MemRefType::get(collapseShapeOp.getSrcType().getShape(),
collapseShapeOp.getSrcType().getElementType(),
- AffineMap(), bufferType.getMemorySpaceAsInt());
+ AffineMap(), bufferType.getMemorySpace());
buffer = rewriter.create<bufferization::ToMemrefOp>(
op->getLoc(), memrefType, *tensorAlloc);
}
@@ -436,7 +435,7 @@ struct FromElementsOpInterface
fromElementsOp.getResult().cast<OpResult>(), options);
// TODO: Implement memory space for this op.
- if (options.defaultMemorySpace != static_cast<unsigned>(0))
+ if (options.defaultMemorySpace != Attribute())
return op->emitError("memory space not implemented yet");
// Allocate a buffer for the result.
@@ -556,7 +555,7 @@ struct GenerateOpInterface
generateOp.getResult().cast<OpResult>(), options);
// TODO: Implement memory space for this op.
- if (options.defaultMemorySpace != static_cast<unsigned>(0))
+ if (options.defaultMemorySpace != Attribute())
return op->emitError("memory space not implemented yet");
// Allocate memory.
@@ -951,7 +950,7 @@ struct ReshapeOpInterface
return failure();
auto resultMemRefType = getMemRefType(
reshapeOp.getResult(), options, /*layout=*/{},
- srcBuffer->getType().cast<BaseMemRefType>().getMemorySpaceAsInt());
+ srcBuffer->getType().cast<BaseMemRefType>().getMemorySpace());
replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
rewriter, op, resultMemRefType, *srcBuffer, *shapeBuffer);
return success();
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
index 5935b8441d537..4c5c5919a79d0 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
@@ -171,7 +171,7 @@ func.func @alloc_tensor_with_copy(%t: tensor<5xf32>) -> tensor<5xf32> {
// CHECK-LABEL: func @alloc_tensor_with_memory_space()
func.func @alloc_tensor_with_memory_space() -> tensor<5xf32> {
// CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1>
- %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
+ %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32>
// CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]]
// CHECK: memref.dealloc %[[alloc]]
// CHECK: return %[[r]]
diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
index b8646edc79e72..2d3a1f1bc006b 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
@@ -40,10 +40,10 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
{
// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32>
// The second alloc_tensor should not have a copy operand.
- // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32>
+ // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<5xf32>
// CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32>
- // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : ui64} : tensor<5xf32>
+ // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : i64} : tensor<5xf32>
%0 = bufferization.alloc_tensor() : tensor<5xf32>
%1 = tensor.insert %f into %0[%idx] : tensor<5xf32>
return %0, %1 : tensor<5xf32>, tensor<5xf32>
@@ -55,7 +55,7 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32)
-> (tensor<5xf32>, tensor<5xf32>)
{
- // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32>
+ // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<5xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
@@ -74,7 +74,7 @@ func.func @do_not_copy_when_result_not_read(%t: tensor<5xf32>, %f: f32)
-> (tensor<3xf32>)
{
%0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32>
- // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<3xf32>
+ // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<3xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir
index f78de27951d4a..a7dc5e07310fd 100644
--- a/mlir/test/Dialect/Bufferization/invalid.mlir
+++ b/mlir/test/Dialect/Bufferization/invalid.mlir
@@ -78,12 +78,3 @@ func.func @sparse_alloc_call() {
call @foo(%0) : (tensor<20x40xf32, #DCSR>) -> ()
return
}
-
-// -----
-
-func.func @alloc_tensor_invalid_memory_space_attr(%sz: index) {
- // expected-error @+1{{'bufferization.alloc_tensor' op attribute 'memory_space' failed to satisfy constraint: 64-bit unsigned integer attribute}}
- %0 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor<?xf32>
- return
-}
-
diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir
index 5b707ba4d7c79..4cb25a6b4e198 100644
--- a/mlir/test/Dialect/Bufferization/ops.mlir
+++ b/mlir/test/Dialect/Bufferization/ops.mlir
@@ -46,6 +46,8 @@ func.func @test_alloc_tensor_op(%t: tensor<?x5xf32>, %sz: index)
%c100 = arith.constant 100 : index
// CHECK: bufferization.alloc_tensor() size_hint=
%6 = bufferization.alloc_tensor() size_hint=%c100 : tensor<100x100xf64, #CSR>
+ // CHECK: bufferization.alloc_tensor(%{{.+}}) {memory_space = "foo"} : tensor<?xf32>
+ %7 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor<?xf32>
return %1 : tensor<?x5xf32>
}
diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index 3640f82a25a96..77a7345876b34 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -717,7 +717,7 @@ func.func @scf_if_memory_space(%c: i1, %f: f32) -> (f32, f32)
{
%c0 = arith.constant 0 : index
// CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1>
- %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
+ %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32>
// CHECK: scf.if %{{.*}} -> (memref<5xf32, 1>) {
%1 = scf.if %c -> tensor<5xf32> {
// CHECK: %[[cloned:.*]] = bufferization.clone %[[alloc]]
@@ -747,7 +747,7 @@ func.func @scf_if_memory_space(%c: i1, %f: f32) -> (f32, f32)
func.func @scf_execute_region_memory_space(%f: f32) -> f32 {
%c0 = arith.constant 0 : index
%0 = scf.execute_region -> tensor<5xf32> {
- %1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
+ %1 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32>
%2 = tensor.insert %f into %1[%c0] : tensor<5xf32>
scf.yield %2 : tensor<5xf32>
}
@@ -767,8 +767,8 @@ func.func @scf_for_swapping_yields_memory_space(
{
// CHECK: memref.alloc(%{{.*}}) {{.*}} : memref<?xf32, 1>
// CHECK: memref.alloc(%{{.*}}) {{.*}} : memref<?xf32, 1>
- %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor<?xf32>
- %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor<?xf32>
+ %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : i64} : tensor<?xf32>
+ %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : i64} : tensor<?xf32>
// CHECK: scf.for {{.*}} {
%r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B)
diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
index b55f8cbe33ae3..de3104540af85 100755
--- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
@@ -22,7 +22,7 @@
// CHECK-LABEL: func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<1024x1024xf64>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<1024x1024xf64>
// CHECK: return %[[VAL_1]] : tensor<1024x1024xf64>
// CHECK: }
func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
@@ -41,7 +41,7 @@ func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK-LABEL: func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<32xf64>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<32xf64>
// CHECK: return %[[VAL_1]] : tensor<32xf64>
// CHECK: }
func.func @fold_yield_direct_zero() -> tensor<32xf64> {
@@ -65,7 +65,7 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false]} : tensor<8x8xf64>
-// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<8x8xf64>
+// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<8x8xf64>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index ab80f0e08ecb1..a1dffec50d763 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -261,7 +261,7 @@ func.func @pad_memory_space(%t: tensor<?xf32>, %h1: index, %f: f32, %pos: index)
// CHECK: %[[alloc_tensor:.*]] = memref.alloc{{.*}} : memref<?xf32, 3>
// CHECK: memref.copy %[[t]], %[[alloc_tensor]]
%0 = bufferization.alloc_tensor() copy(%t)
- {memory_space = 3 : ui64} : tensor<?xf32>
+ {memory_space = 3 : i64} : tensor<?xf32>
// CHECK: %[[padded_alloc:.*]] = memref.alloc() {{.*}} : memref<15xf32, 3>
// CHECK: linalg.map
// CHECK: outs(%[[padded_alloc]] : memref<15xf32, 3>)
More information about the Mlir-commits
mailing list