[flang-commits] [flang] fb4cedc - [mlir][nfc] Clean-up usage of kDynamicSize.

Aliia Khasanova via flang-commits flang-commits at lists.llvm.org
Thu Oct 20 06:55:42 PDT 2022


Author: Aliia Khasanova
Date: 2022-10-20T13:54:57Z
New Revision: fb4cedcc1e0f8ec1071d23fd3910806398c6d6b0

URL: https://github.com/llvm/llvm-project/commit/fb4cedcc1e0f8ec1071d23fd3910806398c6d6b0
DIFF: https://github.com/llvm/llvm-project/commit/fb4cedcc1e0f8ec1071d23fd3910806398c6d6b0.diff

LOG: [mlir][nfc] Clean-up usage of kDynamicSize.

This patch prepares MLIR code base to change the value of kDynamicSize.
https://discourse.llvm.org/t/rfc-unify-kdynamicsize-and-kdynamicstrideoroffset/64534/4

Differential Revision: https://reviews.llvm.org/D136327

Added: 
    

Modified: 
    flang/include/flang/Optimizer/Dialect/FIRTypes.td
    flang/lib/Optimizer/Transforms/AffinePromotion.cpp
    mlir/lib/AsmParser/TypeParser.cpp
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
    mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
    mlir/lib/Dialect/Affine/Utils/Utils.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
    mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
    mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
    mlir/lib/Dialect/Traits.cpp
    mlir/lib/IR/BuiltinTypes.cpp
    mlir/python/mlir/dialects/_tensor_ops_ext.py
    mlir/test/python/dialects/linalg/ops.py
    mlir/test/python/dialects/shape.py
    mlir/test/python/dialects/tensor.py
    mlir/test/python/dialects/vector.py
    mlir/unittests/Dialect/BroadcastShapeTest.cpp

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td
index f179071f1943f..0d06e1d118ea7 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td
+++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td
@@ -128,7 +128,7 @@ def fir_CharacterType : FIR_Type<"Character", "char"> {
     static constexpr LenType singleton() { return 1; }
 
     /// Character has a LEN value which is not a compile-time known constant.
-    static constexpr LenType unknownLen() { return -1; }
+    static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; }
 
     /// Character LEN is a runtime value.
     bool hasDynamicLen() { return getLen() == unknownLen(); }

diff  --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
index ae152cf3a524b..eaf0cd1db93a2 100644
--- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
@@ -410,7 +410,8 @@ createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) {
   auto affineApply = rewriter.create<mlir::AffineApplyOp>(acoOp.getLoc(),
                                                           affineMap, indexArgs);
   auto arrayElementType = coordinateArrayElement(acoOp);
-  auto newType = mlir::MemRefType::get({-1}, arrayElementType);
+  auto newType =
+      mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType);
   auto arrayConvert = rewriter.create<fir::ConvertOp>(acoOp.getLoc(), newType,
                                                       acoOp.getMemref());
   return std::make_pair(affineApply, arrayConvert);

diff  --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp
index 5ab7a89eac01e..fa428b2f06fab 100644
--- a/mlir/lib/AsmParser/TypeParser.cpp
+++ b/mlir/lib/AsmParser/TypeParser.cpp
@@ -510,7 +510,7 @@ Parser::parseDimensionListRanked(SmallVectorImpl<int64_t> &dimensions,
     if (consumeIf(Token::question)) {
       if (!allowDynamic)
         return emitError(loc, "expected static shape");
-      dimensions.push_back(-1);
+      dimensions.push_back(ShapedType::kDynamicSize);
     } else {
       int64_t value;
       if (failed(parseIntegerInDimensionList(value)))

diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index e176e8866790b..381c0a15da283 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -843,7 +843,7 @@ static bool findIntermediateShape(ArrayRef<int64_t> lhsShape,
                                   bool isDynamic) {
   if (isDynamic) {
     // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1
-    intermediateShape = {-1};
+    intermediateShape = {ShapedType::kDynamicSize};
     return true;
   }
 
@@ -1778,7 +1778,8 @@ struct TileConverter : public OpConversionPattern<tosa::TileOp> {
     // Broadcast the newly added dimensions to their appropriate multiple.
     SmallVector<int64_t, 2> genericShape;
     for (int i = 0; i < rank; i++) {
-      genericShape.push_back(multiples[i]);
+      int64_t dim = multiples[i];
+      genericShape.push_back(dim == -1 ? ShapedType::kDynamicSize : dim);
       genericShape.push_back(inputShape[i]);
     }
 

diff  --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 9f2714d1c6173..41c192868e917 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -30,16 +30,16 @@ class SliceOpConverter : public OpRewritePattern<tosa::SliceOp> {
                                 PatternRewriter &rewriter) const final {
     Location loc = sliceOp.getLoc();
     Value input = sliceOp.getInput();
-    SmallVector<int64_t> strides;
+    SmallVector<int64_t> strides, sizes;
     auto starts = sliceOp.getStart();
-    auto sizes = sliceOp.getSize();
     strides.resize(sliceOp.getType().template cast<ShapedType>().getRank(), 1);
 
     SmallVector<Value> dynSizes;
-    for (const auto &i : llvm::enumerate(sizes)) {
+    for (const auto &i : llvm::enumerate(sliceOp.getSize())) {
       int64_t size = i.value().cast<IntegerAttr>().getInt();
       size_t index = i.index();
-      if (size != ShapedType::kDynamicSize)
+      sizes.push_back(size == -1 ? ShapedType::kDynamicSize : size);
+      if (!ShapedType::isDynamic(sizes.back()))
         continue;
 
       auto dim = rewriter.create<tensor::DimOp>(loc, input, index);
@@ -51,7 +51,8 @@ class SliceOpConverter : public OpRewritePattern<tosa::SliceOp> {
 
     auto newSliceOp = rewriter.create<tensor::ExtractSliceOp>(
         sliceOp.getLoc(), sliceOp.getType(), input, ValueRange({}), dynSizes,
-        ValueRange({}), starts, sizes, rewriter.getI64ArrayAttr(strides));
+        ValueRange({}), starts, rewriter.getI64ArrayAttr(sizes),
+        rewriter.getI64ArrayAttr(strides));
 
     rewriter.replaceOp(sliceOp, newSliceOp.getResult());
     return success();

diff  --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index 53ab1130fddaa..9f074a488b4af 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -1796,7 +1796,7 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType,
     bool isDynDim =
         isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context);
     if (isDynDim) {
-      newShape[d] = -1;
+      newShape[d] = ShapedType::kDynamicSize;
     } else {
       // The lower bound for the shape is always zero.
       Optional<int64_t> ubConst =

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
index 1702cd636d237..6a94f50e22202 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
@@ -266,11 +266,12 @@ static Type parseVectorType(AsmParser &parser) {
 
   // We parsed a generic dimension list, but vectors only support two forms:
   //  - single non-dynamic entry in the list (fixed vector);
-  //  - two elements, the first dynamic (indicated by -1) and the second
+  //  - two elements, the first dynamic (indicated by ShapedType::kDynamicSize)
+  //  and the second
   //    non-dynamic (scalable vector).
   if (dims.empty() || dims.size() > 2 ||
-      ((dims.size() == 2) ^ (dims[0] == -1)) ||
-      (dims.size() == 2 && dims[1] == -1)) {
+      ((dims.size() == 2) ^ (ShapedType::isDynamic(dims[0]))) ||
+      (dims.size() == 2 && ShapedType::isDynamic(dims[1]))) {
     parser.emitError(dimPos)
         << "expected '? x <integer> x <type>' or '<integer> x <type>'";
     return Type();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 0995b01092dda..6f642eafda131 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -64,7 +64,8 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
   }
 
   // Fallback dynamic buffer.
-  auto dynamicBufferType = MemRefType::get(-1, b.getIntegerType(8));
+  auto dynamicBufferType =
+      MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8));
   Value mul = b.createOrFold<arith::MulIOp>(
       b.create<arith::ConstantIndexOp>(width), allocSize);
   if (options.useAlloca)
@@ -242,7 +243,7 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
     partialSizes.push_back(
         b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
   }
-  SmallVector<int64_t, 4> dynSizes(fullSizes.size(), -1);
+  SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamicSize);
   // If a callback is not specified, then use the default implementation for
   // allocating the promoted buffer.
   Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout);

diff  --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 9a6727dff9335..fb317374b4246 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -185,7 +185,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
     for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
       int64_t dimSize = memrefType.getDimSize(dim);
       // If this is already static dimension, keep it.
-      if (dimSize != -1) {
+      if (!ShapedType::isDynamic(dimSize)) {
         newShapeConstants.push_back(dimSize);
         continue;
       }
@@ -197,7 +197,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
         newShapeConstants.push_back(constantIndexOp.value());
       } else {
         // Dynamic shape dimension not folded; copy dynamicSize from old memref.
-        newShapeConstants.push_back(-1);
+        newShapeConstants.push_back(ShapedType::kDynamicSize);
         dynamicSizes.push_back(dynamicSize);
       }
       dynamicDimPos++;
@@ -666,7 +666,8 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
 
     for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
       int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
-      if (aDim != -1 && bDim != -1 && aDim != bDim)
+      if (!ShapedType::isDynamic(aDim) && !ShapedType::isDynamic(bDim) &&
+          aDim != bDim)
         return false;
     }
     return true;

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index e9168f7a40517..95a7a47827e40 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -506,7 +506,7 @@ LogicalResult ConcatenateOp::verify() {
               "sum of all the concatenation dimensions of the input tensors.");
       }
     } else {
-      int prev = dstDim;
+      int64_t prev = dstDim;
       for (auto src : getInputs()) {
         auto d = src.getType().cast<RankedTensorType>().getShape()[i];
         if (prev != ShapedType::kDynamicSize && d != prev)

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 841a27479bada..f9469962e2131 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -434,7 +434,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
   }
 
   // Determine the dimension size along the concatenation axis.
-  int concatDimSize = 0;
+  int64_t concatDimSize = 0;
   for (auto operand : operands) {
     ShapeAdaptor operandShape = operands.getShape(operand);
 
@@ -645,7 +645,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
   // Any non dynamic dimension can be multiplied to a known size.
   outputShape.reserve(multiples.size());
   for (int i = 0, s = inputShape.getRank(); i < s; i++) {
-    int dim = inputShape.getDimSize(i);
+    int64_t dim = inputShape.getDimSize(i);
     if (dim != ShapedType::kDynamicSize)
       dim *= multipleValues[i];
     outputShape.push_back(dim);
@@ -655,6 +655,12 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
   return success();
 }
 
+static SmallVector<int64_t> ConvertToMlirShape(ArrayRef<int64_t> shape) {
+  return to_vector(llvm::map_range(shape, [](int64_t dim) {
+    return dim == -1 ? ShapedType::kDynamicSize : dim;
+  }));
+}
+
 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
     MLIRContext *context, ::llvm::Optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
@@ -665,6 +671,7 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
   ArrayAttr newShape = adaptor.getNewShape();
   llvm::SmallVector<int64_t> newShapeValue;
   getI64Values(newShape, newShapeValue);
+  newShapeValue = ConvertToMlirShape(newShapeValue);
 
   // We cannot infer from the total number of elements so we must take the
   // shape attribute as exact.
@@ -679,14 +686,14 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
   int64_t numElements = inputShape.getNumElements();
   int64_t staticMul = 1;
   for (auto val : newShapeValue) {
-    if (val != ShapedType::kDynamicSize) {
+    if (!ShapedType::isDynamic(val)) {
       staticMul *= val;
     }
   }
 
   // Determine the length of the dynamic dimension.
   for (auto &val : newShapeValue) {
-    if (val == ShapedType::kDynamicSize)
+    if (ShapedType::isDynamic(val))
       val = numElements / staticMul;
   }
 
@@ -800,8 +807,8 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
 
   outputShape[0] = inputShape.getDimSize(0);
   outputShape[3] = inputShape.getDimSize(3);
-  int32_t inputHeight = inputShape.getDimSize(1);
-  int32_t inputWidth = inputShape.getDimSize(2);
+  int64_t inputHeight = inputShape.getDimSize(1);
+  int64_t inputWidth = inputShape.getDimSize(2);
 
   if ((inputHeight == ShapedType::kDynamicSize) ||
       (inputWidth == ShapedType::kDynamicSize))
@@ -961,7 +968,7 @@ static LogicalResult poolingInferReturnTypes(
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
   llvm::SmallVector<int64_t> outputShape;
-  outputShape.resize(4, -1);
+  outputShape.resize(4, ShapedType::kDynamicSize);
 
   // We only know the rank if the input type is unranked.
   if (!inputShape) {
@@ -973,8 +980,8 @@ static LogicalResult poolingInferReturnTypes(
   outputShape[0] = inputShape.getDimSize(0);
   outputShape[3] = inputShape.getDimSize(3);
 
-  int32_t height = inputShape.getDimSize(1);
-  int32_t width = inputShape.getDimSize(2);
+  int64_t height = inputShape.getDimSize(1);
+  int64_t width = inputShape.getDimSize(2);
 
   llvm::SmallVector<int64_t> kernel;
   llvm::SmallVector<int64_t> stride;
@@ -984,13 +991,13 @@ static LogicalResult poolingInferReturnTypes(
   getI64Values(attributes.get("stride").cast<ArrayAttr>(), stride);
   getI64Values(attributes.get("pad").cast<ArrayAttr>(), pad);
 
-  if (height != -1) {
-    int32_t padded = height + pad[0] + pad[1] - kernel[0];
+  if (!ShapedType::isDynamic(height)) {
+    int64_t padded = height + pad[0] + pad[1] - kernel[0];
     outputShape[1] = padded / stride[0] + 1;
   }
 
-  if (width != -1) {
-    int32_t padded = width + pad[2] + pad[3] - kernel[1];
+  if (!ShapedType::isDynamic(width)) {
+    int64_t padded = width + pad[2] + pad[3] - kernel[1];
     outputShape[2] = padded / stride[1] + 1;
   }
 
@@ -1005,10 +1012,10 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
   llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamicSize);
   Conv2DOp::Adaptor adaptor(operands.getValues(), attributes);
 
-  int32_t inputWidth = ShapedType::kDynamicSize;
-  int32_t inputHeight = ShapedType::kDynamicSize;
-  int32_t weightWidth = ShapedType::kDynamicSize;
-  int32_t weightHeight = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamicSize;
+  int64_t inputHeight = ShapedType::kDynamicSize;
+  int64_t weightWidth = ShapedType::kDynamicSize;
+  int64_t weightHeight = ShapedType::kDynamicSize;
 
   // Input shape describes input width/height and batch.
 
@@ -1045,17 +1052,17 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
 
   if (!ShapedType::isDynamic(inputHeight) &&
       !ShapedType::isDynamic(weightHeight)) {
-    int32_t inputSize = inputHeight + padding[0] + padding[1];
-    int32_t filterSize = (weightHeight - 1) * dilation[0] + 1;
-    int32_t unstridedResult = inputSize - filterSize + 1;
+    int64_t inputSize = inputHeight + padding[0] + padding[1];
+    int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
+    int64_t unstridedResult = inputSize - filterSize + 1;
     outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
   }
 
   if (!ShapedType::isDynamic(inputWidth) &&
       !ShapedType::isDynamic(weightWidth)) {
-    int32_t inputSize = inputWidth + padding[2] + padding[3];
-    int32_t filterSize = (weightWidth - 1) * dilation[1] + 1;
-    int32_t unstridedResult = inputSize - filterSize + 1;
+    int64_t inputSize = inputWidth + padding[2] + padding[3];
+    int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
+    int64_t unstridedResult = inputSize - filterSize + 1;
     outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
   }
 
@@ -1072,13 +1079,13 @@ LogicalResult Conv3DOp::inferReturnTypeComponents(
   llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamicSize);
   Conv3DOp::Adaptor adaptor(operands.getValues(), attributes);
 
-  int32_t inputWidth = ShapedType::kDynamicSize;
-  int32_t inputHeight = ShapedType::kDynamicSize;
-  int32_t inputDepth = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamicSize;
+  int64_t inputHeight = ShapedType::kDynamicSize;
+  int64_t inputDepth = ShapedType::kDynamicSize;
 
-  int32_t weightWidth = ShapedType::kDynamicSize;
-  int32_t weightHeight = ShapedType::kDynamicSize;
-  int32_t weightDepth = ShapedType::kDynamicSize;
+  int64_t weightWidth = ShapedType::kDynamicSize;
+  int64_t weightHeight = ShapedType::kDynamicSize;
+  int64_t weightDepth = ShapedType::kDynamicSize;
 
   // Input shape describes input width/height and batch.
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
@@ -1163,13 +1170,13 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
   llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamicSize);
   DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
 
-  int32_t inputWidth = ShapedType::kDynamicSize;
-  int32_t inputHeight = ShapedType::kDynamicSize;
-  int32_t inputChannels = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamicSize;
+  int64_t inputHeight = ShapedType::kDynamicSize;
+  int64_t inputChannels = ShapedType::kDynamicSize;
 
-  int32_t weightWidth = ShapedType::kDynamicSize;
-  int32_t weightHeight = ShapedType::kDynamicSize;
-  int32_t depthChannels = ShapedType::kDynamicSize;
+  int64_t weightWidth = ShapedType::kDynamicSize;
+  int64_t weightHeight = ShapedType::kDynamicSize;
+  int64_t depthChannels = ShapedType::kDynamicSize;
 
   // Input shape describes input width/height and batch.
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
@@ -1216,17 +1223,17 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
 
   if (!ShapedType::isDynamic(inputHeight) &&
       !ShapedType::isDynamic(weightHeight)) {
-    int32_t inputSize = inputHeight + padding[0] + padding[1];
-    int32_t filterSize = (weightHeight - 1) * dilation[0] + 1;
-    int32_t unstridedResult = inputSize - filterSize + 1;
+    int64_t inputSize = inputHeight + padding[0] + padding[1];
+    int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
+    int64_t unstridedResult = inputSize - filterSize + 1;
     outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
   }
 
   if (!ShapedType::isDynamic(inputWidth) &&
       !ShapedType::isDynamic(weightWidth)) {
-    int32_t inputSize = inputWidth + padding[2] + padding[3];
-    int32_t filterSize = (weightWidth - 1) * dilation[1] + 1;
-    int32_t unstridedResult = inputSize - filterSize + 1;
+    int64_t inputSize = inputWidth + padding[2] + padding[3];
+    int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
+    int64_t unstridedResult = inputSize - filterSize + 1;
     outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
   }
 
@@ -1243,11 +1250,12 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
   TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
   llvm::SmallVector<int64_t> outputShape;
   getI64Values(adaptor.getOutShape(), outputShape);
+  outputShape = ConvertToMlirShape(outputShape);
 
-  int32_t inputWidth = ShapedType::kDynamicSize;
-  int32_t inputHeight = ShapedType::kDynamicSize;
-  int32_t weightWidth = ShapedType::kDynamicSize;
-  int32_t weightHeight = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamicSize;
+  int64_t inputHeight = ShapedType::kDynamicSize;
+  int64_t weightWidth = ShapedType::kDynamicSize;
+  int64_t weightHeight = ShapedType::kDynamicSize;
 
   // Input shape describes input width/height and batch.
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
@@ -1285,16 +1293,18 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
 
   if (!ShapedType::isDynamic(inputHeight) &&
       !ShapedType::isDynamic(weightHeight)) {
-    int32_t calculateSize =
+    int64_t calculateSize =
         (inputHeight - 1) * stride[0] - padding[0] - padding[1] + weightHeight;
-    outputShape[1] = outputShape[1] == -1 ? calculateSize : outputShape[1];
+    outputShape[1] =
+        ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1];
   }
 
   if (!ShapedType::isDynamic(inputWidth) &&
       !ShapedType::isDynamic(weightWidth)) {
-    int32_t calculateSize =
+    int64_t calculateSize =
         (inputWidth - 1) * stride[1] - padding[2] - padding[3] + weightWidth;
-    outputShape[2] = outputShape[2] == -1 ? calculateSize : outputShape[2];
+    outputShape[2] =
+        ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2];
   }
 
   inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
index 936e50a18f98c..790648e451e48 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
@@ -19,6 +19,12 @@ using namespace mlir::tosa;
 
 namespace {
 
+SmallVector<int64_t> ConvertFromMlirShape(ArrayRef<int64_t> shape) {
+  return to_vector(llvm::map_range(shape, [](int64_t dim) {
+    return ShapedType::isDynamic(dim) ? -1 : dim;
+  }));
+}
+
 struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
   explicit Conv2DIsFullyConnected(MLIRContext *context)
       : OpRewritePattern(context) {}
@@ -52,16 +58,17 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
 
     // Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC].
     ArrayRef<int64_t> inputShape = inputType.getShape();
-    int64_t combined = inputShape[0] * inputShape[1] * inputShape[2];
-    if (combined < 0)
-      combined = ShapedType::kDynamicSize;
+    int64_t combined = ShapedType::kDynamicSize;
+    if (numDynamic == 0)
+      combined = inputShape[0] * inputShape[1] * inputShape[2];
     llvm::SmallVector<int64_t, 2> revisedInputShape{combined, inputShape[3]};
     auto revisedInputShapeType =
         RankedTensorType::get(revisedInputShape, inputType.getElementType());
     auto reshapedInput = rewriter
                              .create<tosa::ReshapeOp>(
                                  op.getLoc(), revisedInputShapeType, input,
-                                 rewriter.getI64ArrayAttr(revisedInputShape))
+                                 rewriter.getI64ArrayAttr(
+                                     ConvertFromMlirShape(revisedInputShape)))
                              .getResult();
 
     // Reshape kernel to [OC,KH,KW,IC] -> [OC, IC].
@@ -73,7 +80,8 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
     auto reshapedWeight = rewriter
                               .create<tosa::ReshapeOp>(
                                   op.getLoc(), revisedWeightShapeType, weight,
-                                  rewriter.getI64ArrayAttr(revisedWeightShape))
+                                  rewriter.getI64ArrayAttr(
+                                      ConvertFromMlirShape(revisedWeightShape)))
                               .getResult();
 
     // Perform a fully connected network over the reshaped input and weight.
@@ -102,7 +110,7 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
                                               inputShape[2], weightShape[0]};
     rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
         op, resultType, fullyConnectedValue,
-        rewriter.getI64ArrayAttr(outputShape));
+        rewriter.getI64ArrayAttr(ConvertFromMlirShape(outputShape)));
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/Traits.cpp b/mlir/lib/Dialect/Traits.cpp
index ce2feff441bc9..1b3b373fef3fd 100644
--- a/mlir/lib/Dialect/Traits.cpp
+++ b/mlir/lib/Dialect/Traits.cpp
@@ -80,7 +80,7 @@ bool OpTrait::util::getBroadcastedShape(ArrayRef<int64_t> shape1,
 
   // Check each dimension is consistent.
   for (; i1 != e1 && i2 != e2; ++i1, ++i2, ++iR) {
-    if (*i1 == -1 || *i2 == -1) {
+    if (ShapedType::isDynamic(*i1) || ShapedType::isDynamic(*i2)) {
       // One or both dimensions is unknown. Follow TensorFlow behavior:
       // - If either dimension is greater than 1, we assume that the program is
       //   correct, and the other dimension will be broadcast to match it.
@@ -94,7 +94,7 @@ bool OpTrait::util::getBroadcastedShape(ArrayRef<int64_t> shape1,
       } else if (*i2 == 1) {
         *iR = *i1;
       } else {
-        *iR = -1;
+        *iR = ShapedType::kDynamicSize;
       }
     } else {
       if (*i1 == *i2 || *i2 == 1) {
@@ -199,7 +199,8 @@ static bool isCompatibleInferredReturnShape(ArrayRef<int64_t> inferred,
     // then it is compatible, else if the inferred dim is 1 then it is also
     // compatible. But if the existing dim is 1 and the inferred is greater than
     // 1 then flag.
-    return dim1 == dim2 || dim1 == -1 || dim2 == -1 || dim1 == 1;
+    return dim1 == dim2 || ShapedType::isDynamic(dim1) ||
+           ShapedType::isDynamic(dim2) || dim1 == 1;
   };
   if (inferred.size() != existing.size())
     return false;

diff  --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index 013686719e8e8..fe6d6ac3b2c4d 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -335,7 +335,7 @@ RankedTensorType::verify(function_ref<InFlightDiagnostic()> emitError,
                          ArrayRef<int64_t> shape, Type elementType,
                          Attribute encoding) {
   for (int64_t s : shape)
-    if (s < -1)
+    if (s < 0 && !ShapedType::isDynamic(s))
       return emitError() << "invalid tensor dimension size";
   if (auto v = encoding.dyn_cast_or_null<VerifiableTensorEncoding>())
     if (failed(v.verifyEncoding(shape, elementType, emitError)))
@@ -656,9 +656,9 @@ LogicalResult MemRefType::verify(function_ref<InFlightDiagnostic()> emitError,
   if (!BaseMemRefType::isValidElementType(elementType))
     return emitError() << "invalid memref element type";
 
-  // Negative sizes are not allowed except for `-1` that means dynamic size.
+  // Negative sizes are not allowed except for `kDynamicSize`.
   for (int64_t s : shape)
-    if (s < -1)
+    if (s < 0 && !ShapedType::isDynamic(s))
       return emitError() << "invalid memref size";
 
   assert(layout && "missing layout specification");

diff  --git a/mlir/python/mlir/dialects/_tensor_ops_ext.py b/mlir/python/mlir/dialects/_tensor_ops_ext.py
index 0f1b266034bce..51d998b6e3ceb 100644
--- a/mlir/python/mlir/dialects/_tensor_ops_ext.py
+++ b/mlir/python/mlir/dialects/_tensor_ops_ext.py
@@ -30,7 +30,7 @@ def __init__(self,
       if isinstance(s, int):
         static_sizes.append(s)
       else:
-        static_sizes.append(-1)
+        static_sizes.append(ShapedType.get_dynamic_size())
         dynamic_sizes.append(s)
     result_type = RankedTensorType.get(static_sizes, element_type)
     op = self.build_generic(

diff  --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py
index e14ec423e3b30..367aa331e9b20 100644
--- a/mlir/test/python/dialects/linalg/ops.py
+++ b/mlir/test/python/dialects/linalg/ops.py
@@ -23,7 +23,8 @@ def testFill():
       #  CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32
       #  CHECK-NEXT: %[[RES:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : tensor<12x?xf32>) -> tensor<12x?xf32>
       #  CHECK-NEXT: return %[[RES]] : tensor<12x?xf32>
-      @func.FuncOp.from_py_func(RankedTensorType.get((12, -1), f32))
+      @func.FuncOp.from_py_func(
+          RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32))
       def fill_tensor(out):
         zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result
         return linalg.fill(zero, outs=[out])
@@ -33,7 +34,8 @@ def fill_tensor(out):
       #  CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32
       #  CHECK-NEXT: linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : memref<12x?xf32>)
       #  CHECK-NEXT: return
-      @func.FuncOp.from_py_func(MemRefType.get((12, -1), f32))
+      @func.FuncOp.from_py_func(
+          MemRefType.get((12, ShapedType.get_dynamic_size()), f32))
       def fill_buffer(out):
         zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result
         linalg.fill(zero, outs=[out])

diff  --git a/mlir/test/python/dialects/shape.py b/mlir/test/python/dialects/shape.py
index 849b5ef8a1a92..2ebad0d8acbfa 100644
--- a/mlir/test/python/dialects/shape.py
+++ b/mlir/test/python/dialects/shape.py
@@ -20,7 +20,7 @@ def testConstShape():
     f32 = F32Type.get()
     with InsertionPoint(module.body):
       @func.FuncOp.from_py_func(
-          RankedTensorType.get((12, -1), f32))
+          RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32))
       def const_shape_tensor(arg):
         return shape.ConstShapeOp(
           DenseElementsAttr.get(np.array([10, 20], dtype=np.int64), type=IndexType.get()))

diff  --git a/mlir/test/python/dialects/tensor.py b/mlir/test/python/dialects/tensor.py
index cb05feb2cce99..f7f73a12ed4c5 100644
--- a/mlir/test/python/dialects/tensor.py
+++ b/mlir/test/python/dialects/tensor.py
@@ -21,7 +21,10 @@ def testDimOp():
     indexType = IndexType.get()
     with InsertionPoint(module.body):
 
-      @func.FuncOp.from_py_func(RankedTensorType.get((-1, -1), f32Type))
+      @func.FuncOp.from_py_func(
+          RankedTensorType.get(
+              (ShapedType.get_dynamic_size(), ShapedType.get_dynamic_size()),
+              f32Type))
       #      CHECK: func @tensor_static_dim
       # CHECK-SAME:     %[[ARG0:.+]]: tensor<?x?xf32>
       #  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index

diff  --git a/mlir/test/python/dialects/vector.py b/mlir/test/python/dialects/vector.py
index 8f8d7f19191cf..83c09616d56fe 100644
--- a/mlir/test/python/dialects/vector.py
+++ b/mlir/test/python/dialects/vector.py
@@ -35,7 +35,9 @@ def testTransferReadOp():
   module = Module.create()
   with InsertionPoint(module.body):
     vector_type = VectorType.get([2, 3], F32Type.get())
-    memref_type = MemRefType.get([-1, -1], F32Type.get())
+    memref_type = MemRefType.get(
+        [ShapedType.get_dynamic_size(),
+         ShapedType.get_dynamic_size()], F32Type.get())
     index_type = IndexType.get()
     mask_type = VectorType.get(vector_type.shape, IntegerType.get_signless(1))
     identity_map = AffineMap.get_identity(vector_type.rank)

diff  --git a/mlir/unittests/Dialect/BroadcastShapeTest.cpp b/mlir/unittests/Dialect/BroadcastShapeTest.cpp
index de9b733878ea9..f1ab02d376c92 100644
--- a/mlir/unittests/Dialect/BroadcastShapeTest.cpp
+++ b/mlir/unittests/Dialect/BroadcastShapeTest.cpp
@@ -47,9 +47,10 @@ TEST(BroadcastShapeTest, InterleavingOnes) {
 
 TEST(BroadcastShapeTest, InterleavingUnknowns) {
   SmallVector<int64_t, 4> result;
-  ASSERT_TRUE(
-      getBroadcastedShape({1, 2, -1, -1, -1}, {-1, -1, -1, 4, 1}, result));
-  EXPECT_THAT(result, ElementsAre(-1, 2, -1, 4, -1));
+  int64_t dyn = mlir::ShapedType::kDynamicSize;
+  ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1},
+                                  result));
+  EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn));
 }
 
 TEST(BroadcastShapeTest, IncompatibleLowDim) {


        


More information about the flang-commits mailing list