[Mlir-commits] [mlir] bfde178 - [mlir] Update the return type of `getNum{Dynamic|Scalable}Dims` (#110472)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Mon Sep 30 06:53:54 PDT 2024
Author: Andrzej WarzyĆski
Date: 2024-09-30T14:53:50+01:00
New Revision: bfde17834dd9bd30da8f56166cd545f566f64895
URL: https://github.com/llvm/llvm-project/commit/bfde17834dd9bd30da8f56166cd545f566f64895
DIFF: https://github.com/llvm/llvm-project/commit/bfde17834dd9bd30da8f56166cd545f566f64895.diff
LOG: [mlir] Update the return type of `getNum{Dynamic|Scalable}Dims` (#110472)
Updates the return type of `getNumDynamicDims` and `getNumScalableDims`
from `int64_t` to `size_t`. This is for consistency with other
helpers/methods that return "size" and to reduce the number of
`static_cast`s in various places.
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
mlir/include/mlir/IR/BuiltinTypeInterfaces.td
mlir/include/mlir/IR/BuiltinTypes.td
mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
index a154d7fa5fb6e5..620fd7c63146dd 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
@@ -293,7 +293,7 @@ class SparseTensorType {
/// Returns the number of dimensions which have dynamic sizes.
/// The return type is `int64_t` to maintain consistency with
/// `ShapedType::Trait<T>::getNumDynamicDims`.
- int64_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); }
+ size_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); }
ArrayRef<LevelType> getLvlTypes() const { return enc.getLvlTypes(); }
LevelType getLvlType(Level l) const {
diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
index db38e2e1bce22a..c9dcd546cf67c2 100644
--- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
@@ -166,7 +166,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
/// If this is a ranked type, return the number of dimensions with dynamic
/// size. Otherwise, abort.
- int64_t getNumDynamicDims() const {
+ size_t getNumDynamicDims() const {
return llvm::count_if($_type.getShape(), ::mlir::ShapedType::isDynamic);
}
diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td
index c738a8a3becc16..b2b41b16beec29 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.td
+++ b/mlir/include/mlir/IR/BuiltinTypes.td
@@ -1253,7 +1253,7 @@ def Builtin_Vector : Builtin_Type<"Vector", "vector",
}
/// Get the number of scalable dimensions.
- int64_t getNumScalableDims() const {
+ size_t getNumScalableDims() const {
return llvm::count(getScalableDims(), true);
}
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 04a8ff30ee946b..f1841b860ff81a 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -249,8 +249,7 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options,
LogicalResult AllocTensorOp::verify() {
if (getCopy() && !getDynamicSizes().empty())
return emitError("dynamic sizes not needed when copying a tensor");
- if (!getCopy() && getType().getNumDynamicDims() !=
- static_cast<int64_t>(getDynamicSizes().size()))
+ if (!getCopy() && getType().getNumDynamicDims() != getDynamicSizes().size())
return emitError("expected ")
<< getType().getNumDynamicDims() << " dynamic sizes";
if (getCopy() && getCopy().getType() != getType())
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index f822c11aeec008..956877497d9338 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -2045,8 +2045,7 @@ void WaitOp::getCanonicalizationPatterns(RewritePatternSet &results,
LogicalResult AllocOp::verify() {
auto memRefType = llvm::cast<MemRefType>(getMemref().getType());
- if (static_cast<int64_t>(getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (getDynamicSizes().size() != memRefType.getNumDynamicDims())
return emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 75b9729e63648c..d579a27359dfa0 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -205,8 +205,7 @@ static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
if (!memRefType)
return op.emitOpError("result must be a memref");
- if (static_cast<int64_t>(op.getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (op.getDynamicSizes().size() != memRefType.getNumDynamicDims())
return op.emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
@@ -283,8 +282,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
// Create new memref type (which will have fewer dynamic dimensions).
MemRefType newMemRefType =
MemRefType::Builder(memrefType).setShape(newShapeConstants);
- assert(static_cast<int64_t>(dynamicSizes.size()) ==
- newMemRefType.getNumDynamicDims());
+ assert(dynamicSizes.size() == newMemRefType.getNumDynamicDims());
// Create and insert the alloc op for the new memref.
auto newAlloc = rewriter.create<AllocLikeOp>(
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 1ac96756e22b5e..defac8308b9092 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -179,8 +179,7 @@ static RankedTensorType
foldDynamicToStaticDimSizes(RankedTensorType type, ValueRange dynamicSizes,
SmallVector<Value> &foldedDynamicSizes) {
SmallVector<int64_t> staticShape(type.getShape());
- assert(type.getNumDynamicDims() ==
- static_cast<int64_t>(dynamicSizes.size()) &&
+ assert(type.getNumDynamicDims() == dynamicSizes.size() &&
"incorrect number of dynamic sizes");
// Compute new static and dynamic sizes.
@@ -894,8 +893,7 @@ void EmptyOp::build(OpBuilder &builder, OperationState &result,
}
LogicalResult EmptyOp::verify() {
- if (getType().getNumDynamicDims() !=
- static_cast<int64_t>(getDynamicSizes().size()))
+ if (getType().getNumDynamicDims() != getDynamicSizes().size())
return emitOpError("incorrect number of dynamic sizes, has ")
<< getDynamicSizes().size() << ", expected "
<< getType().getNumDynamicDims();
@@ -3672,8 +3670,7 @@ void SplatOp::getAsmResultNames(
}
LogicalResult SplatOp::verify() {
- if (getType().getNumDynamicDims() !=
- static_cast<int64_t>(getDynamicSizes().size()))
+ if (getType().getNumDynamicDims() != getDynamicSizes().size())
return emitOpError("incorrect number of dynamic sizes, has ")
<< getDynamicSizes().size() << ", expected "
<< getType().getNumDynamicDims();
More information about the Mlir-commits
mailing list