[Mlir-commits] [mlir] 6596b0d - [mlir][tensor] Clean up tensor::DimOp usage

Matthias Springer llvmlistbot at llvm.org
Thu Jun 22 01:56:25 PDT 2023


Author: Matthias Springer
Date: 2023-06-22T10:56:17+02:00
New Revision: 6596b0dde85888117bd230f64906a8c4de968b87

URL: https://github.com/llvm/llvm-project/commit/6596b0dde85888117bd230f64906a8c4de968b87
DIFF: https://github.com/llvm/llvm-project/commit/6596b0dde85888117bd230f64906a8c4de968b87.diff

LOG: [mlir][tensor] Clean up tensor::DimOp usage

* Remove duplicate functions. `tensor::getMixedSize` and `tensor::getMixedSizes` should be used.
* Use `tensor::getMixedSize` instead of `createOrFold<tensor::DimOp>`. This is more efficient. `createOrFold` will create an op an immediately try to fold it. In case of a static dimension size, an attribute can be used directly.

Differential Revision: https://reviews.llvm.org/D153332

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
    mlir/include/mlir/Dialect/Tensor/Utils/Utils.h
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
    mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
    mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
    mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
    mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
    mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
    mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
    mlir/lib/Dialect/Tensor/Utils/Utils.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h b/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
index c64959bfbd74a..06642adda42b3 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
+++ b/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
@@ -113,6 +113,10 @@ bool canFoldIntoProducerOp(CastOp castOp);
 /// that can be folded.
 LogicalResult foldTensorCast(Operation *op);
 
+/// Return the dimension of the given tensor value.
+OpFoldResult getMixedSize(OpBuilder &builder, Location loc, Value value,
+                          int64_t dim);
+
 /// Return the dimensions of the given tensor value.
 SmallVector<OpFoldResult> getMixedSizes(OpBuilder &builder, Location loc,
                                         Value value);

diff  --git a/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h b/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h
index c610b5d0f737c..bdb988bd46315 100644
--- a/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h
@@ -26,16 +26,6 @@ PadOp createPadHighOp(RankedTensorType type, Value source, Value pad,
 SmallVector<Value> createDynamicDimValues(OpBuilder &b, Location loc,
                                           Value rankedTensor);
 
-// Returns the tensor extent along dimension `dim` if `rankedTensor` is of
-// `RankedTensorType`. Returns `failure()` otherwise.
-FailureOr<OpFoldResult> createDimValue(OpBuilder &b, Location loc,
-                                       Value rankedTensor, int64_t dim);
-
-// Creates dim ops or constant ops for each dimension of the ranked tensor
-// argument and returns these as values.
-SmallVector<OpFoldResult> createDimValues(OpBuilder &b, Location loc,
-                                          Value rankedTensor);
-
 /// Returns the transposed `rankedTensorType` if `transposeVector` is non-empty.
 /// Fail if `transposeVector` is not a permutation matching the tensor rank.
 FailureOr<RankedTensorType>

diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index f30aa0b1521a8..3c1a63bf8e21a 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1884,8 +1884,8 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {
     llvm::SmallVector<Value> results;
 
     auto addDynamicDimension = [&](Value source, int64_t dim) {
-      auto dynamicDim = tensor::createDimValue(builder, loc, source, dim);
-      if (auto dimValue = llvm::dyn_cast_if_present<Value>(dynamicDim.value()))
+      auto sz = tensor::getMixedSize(builder, loc, source, dim);
+      if (auto dimValue = llvm::dyn_cast_if_present<Value>(sz))
         results.push_back(dimValue);
     };
 

diff  --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 0b2b006bfc365..62988813b4573 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -367,8 +367,8 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
 
     SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
     SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
-    SmallVector<OpFoldResult> sizes = tensor::createDimValues(
-        rewriter, op.getLoc(), adaptor.getOperands()[0]);
+    SmallVector<OpFoldResult> sizes =
+        tensor::getMixedSizes(rewriter, op.getLoc(), adaptor.getOperands()[0]);
 
     // Pre-compute the offsets along the axis dimension.
     // The axisOffsets will be of size rank + 1, where the last value
@@ -403,7 +403,7 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
         loc, resultType.getShape(), resultType.getElementType(), dynDims);
 
     for (auto [arg, offset] : llvm::zip(adaptor.getOperands(), axisOffsets)) {
-      auto sizes = tensor::createDimValues(rewriter, op.getLoc(), arg);
+      auto sizes = tensor::getMixedSizes(rewriter, op.getLoc(), arg);
       offsets[axis] = offset;
       result = rewriter.createOrFold<tensor::InsertSliceOp>(
           loc, arg, result, offsets, sizes, strides);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index f23830699aeb9..894036a535302 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -310,7 +310,7 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
       rewriter.setInsertionPointAfterValue(op->get());
       auto elemType = cast<ShapedType>(op->get().getType()).getElementType();
       auto empty = rewriter.create<tensor::EmptyOp>(
-          loc, tensor::createDimValues(rewriter, loc, op->get()), elemType);
+          loc, tensor::getMixedSizes(rewriter, loc, op->get()), elemType);
 
       auto [start, end] = genericOp.getDpsInitsPositionRange();
       newOutputOperands[op->getOperandNumber() - start] = empty.getResult();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 528fc477422db..581e7b0a8ea86 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -1779,16 +1779,10 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
         if (definingOp)
           continue;
         modifiedOutput = true;
-        SmallVector<Value> dynamicDims;
-        for (const auto &dim : llvm::enumerate(operandType.getShape())) {
-          if (dim.value() != ShapedType::kDynamic)
-            continue;
-          dynamicDims.push_back(rewriter.createOrFold<tensor::DimOp>(
-              loc, operandVal, dim.index()));
-        }
+        SmallVector<OpFoldResult> mixedSizes =
+            tensor::getMixedSizes(rewriter, loc, operandVal);
         Value emptyTensor = rewriter.create<tensor::EmptyOp>(
-            loc, operandType.getShape(), operandType.getElementType(),
-            dynamicDims);
+            loc, mixedSizes, operandType.getElementType());
         op->setOperand(opOperand->getOperandNumber(), emptyTensor);
       }
     }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index 36f13fa64dccb..54f0bd249c3cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -291,7 +291,7 @@ struct LinalgOpPartialReductionInterface
       int64_t dim = oldShape[oldIdx];
       newOutputShape.push_back(dim);
       if (ShapedType::isDynamic(dim))
-        dynamicDims.push_back(b.createOrFold<tensor::DimOp>(
+        dynamicDims.push_back(b.create<tensor::DimOp>(
             loc, linalgOp.getDpsInitOperand(0)->get(), oldIdx));
     }
     Value emptyTensor = b.create<tensor::EmptyOp>(

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 165c350ecf88a..5fd9228233532 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -512,12 +512,10 @@ FailureOr<LowerPackResult> linalg::lowerPack(RewriterBase &rewriter,
        llvm::zip_equal(packOp.getInnerDimsPos(), packOp.getMixedTiles())) {
     int outerPos =
         packedToStripMinedShapePerm[packingMetadata.outerPositions[pos]];
-    OpFoldResult origSize = rewriter.createOrFold<tensor::DimOp>(
-        loc, packOp.getSource(),
-        rewriter.create<arith::ConstantIndexOp>(loc, pos));
-    OpFoldResult outerSize = rewriter.createOrFold<tensor::DimOp>(
-        loc, packOp.getDest(),
-        rewriter.create<arith::ConstantIndexOp>(loc, outerPos));
+    OpFoldResult origSize =
+        tensor::getMixedSize(rewriter, loc, packOp.getSource(), pos);
+    OpFoldResult outerSize =
+        tensor::getMixedSize(rewriter, loc, packOp.getDest(), outerPos);
     AffineExpr s0, d0, d1;
     bindDims(rewriter.getContext(), d0, d1);
     bindSymbols(rewriter.getContext(), s0);
@@ -1132,8 +1130,8 @@ GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
   SmallVector<int64_t> staticSizes;
   for (unsigned dim = 0; dim < resultType.getRank(); ++dim) {
     if (resultType.isDynamicDim(dim)) {
-      auto srcSize = rewriter.createOrFold<tensor::DimOp>(
-          padOp.getLoc(), padOp.getSource(), dim);
+      auto srcSize = getIdxValue(tensor::getMixedSize(rewriter, padOp.getLoc(),
+                                                      padOp.getSource(), dim));
       // Add low and high padding value.
       auto plusLow = rewriter.createOrFold<arith::AddIOp>(
           padOp.getLoc(), srcSize, getIdxValue(padOp.getMixedLowPad()[dim]));
@@ -1157,15 +1155,8 @@ GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
   // for copying the PadOp source.
   auto sourceType = padOp.getSourceType();
   // Compute size of source of tensor::PadOp.
-  SmallVector<OpFoldResult> srcSizes;
-  for (unsigned dim = 0; dim < sourceType.getRank(); ++dim) {
-    if (sourceType.isDynamicDim(dim)) {
-      srcSizes.push_back(rewriter.createOrFold<tensor::DimOp>(
-          padOp.getLoc(), padOp.getSource(), dim));
-    } else {
-      srcSizes.push_back(rewriter.getIndexAttr(sourceType.getDimSize(dim)));
-    }
-  }
+  SmallVector<OpFoldResult> srcSizes =
+      tensor::getMixedSizes(rewriter, padOp.getLoc(), padOp.getSource());
   // Strides of InsertSliceOp are all 1.
   SmallVector<OpFoldResult> strides(sourceType.getRank(),
                                     rewriter.getIndexAttr(1));
@@ -1459,8 +1450,8 @@ LogicalResult GeneralizeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
   ArrayRef<int64_t> destShape = unpackOp.getDestType().getShape();
   for (auto i : llvm::seq<unsigned>(0, destRank)) {
     if (dimAndTileMapping.count(i) || destShape[i] != 1)
-      tileSizes.push_back(getAsOpFoldResult(
-          rewriter.createOrFold<tensor::DimOp>(loc, unpackOp.getDest(), i)));
+      tileSizes.push_back(
+          tensor::getMixedSize(rewriter, loc, unpackOp.getDest(), i));
   }
 
   auto partialTile = rewriter.create<tensor::ExtractSliceOp>(

diff  --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 224bec3b26d29..3f8fe9a23f55e 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -455,7 +455,7 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b,
   SmallVector<OpFoldResult> resultSizesList;
   for (size_t i = 0; i < offsets.size(); i++)
     resultSizesList.push_back(
-        b.createOrFold<tensor::DimOp>(loc, parallelOp->getResult(0), i));
+        tensor::getMixedSize(b, loc, parallelOp->getResult(0), i));
   SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
   SmallVector<Value> replacements = yieldTiledValues(
       b, (*identityTensor)->getResults(), parallelOp->getResults(), outOffsets,

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 283f1be6aa793..54690aa687509 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -46,18 +46,22 @@ Operation *TensorDialect::materializeConstant(OpBuilder &builder,
   return nullptr;
 }
 
+OpFoldResult tensor::getMixedSize(OpBuilder &builder, Location loc, Value value,
+                                  int64_t dim) {
+  auto tensorType = llvm::cast<RankedTensorType>(value.getType());
+  SmallVector<OpFoldResult> result;
+  if (tensorType.isDynamicDim(dim))
+    return builder.createOrFold<tensor::DimOp>(loc, value, dim);
+
+  return builder.getIndexAttr(tensorType.getDimSize(dim));
+}
+
 SmallVector<OpFoldResult> tensor::getMixedSizes(OpBuilder &builder,
                                                 Location loc, Value value) {
   auto tensorType = llvm::cast<RankedTensorType>(value.getType());
   SmallVector<OpFoldResult> result;
-  for (int64_t i = 0; i < tensorType.getRank(); ++i) {
-    if (tensorType.isDynamicDim(i)) {
-      Value size = builder.create<tensor::DimOp>(loc, value, i);
-      result.push_back(size);
-    } else {
-      result.push_back(builder.getIndexAttr(tensorType.getDimSize(i)));
-    }
-  }
+  for (int64_t i = 0; i < tensorType.getRank(); ++i)
+    result.push_back(getMixedSize(builder, loc, value, i));
   return result;
 }
 
@@ -2283,15 +2287,7 @@ OpFoldResult InsertSliceOp::fold(FoldAdaptor) {
 LogicalResult InsertSliceOp::reifyResultShapes(
     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
   reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(getType().getRank()));
-  for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
-    if (getType().isDynamicDim(dim)) {
-      reifiedReturnShapes[0][dim] =
-          builder.createOrFold<tensor::DimOp>(getLoc(), getDest(), dim);
-    } else {
-      reifiedReturnShapes[0][dim] =
-          builder.getIndexAttr(getType().getDimSize(dim));
-    }
-  }
+  reifiedReturnShapes[0] = tensor::getMixedSizes(builder, getLoc(), getDest());
   return success();
 }
 
@@ -3254,16 +3250,8 @@ reifyResultShapesImpl(OpTy op, OpBuilder &builder,
                 "applies to only pack or unpack operations");
   int64_t destRank = op.getDestRank();
   reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(destRank));
-  ShapedType resultType = llvm::cast<ShapedType>(op.getResult().getType());
-  for (auto dim : llvm::seq<int64_t>(0, destRank)) {
-    if (resultType.isDynamicDim(dim)) {
-      reifiedReturnShapes[0][dim] =
-          builder.createOrFold<tensor::DimOp>(op.getLoc(), op.getDest(), dim);
-    } else {
-      reifiedReturnShapes[0][dim] =
-          builder.getIndexAttr(resultType.getDimSize(dim));
-    }
-  }
+  reifiedReturnShapes[0] =
+      tensor::getMixedSizes(builder, op.getLoc(), op.getDest());
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
index 52ba083db160f..56cfdfcf0b8b9 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
@@ -134,7 +134,7 @@ struct PackOpTiling
     DenseMap<int64_t, OpFoldResult> dimAndTileMapping =
         packOp.getDimAndTileMapping();
     SmallVector<OpFoldResult> srcDimValues =
-        tensor::createDimValues(b, loc, packOp.getSource());
+        tensor::getMixedSizes(b, loc, packOp.getSource());
     SmallVector<OpFoldResult> inputIndices, inputSizes;
     for (auto dim : llvm::seq<int64_t>(0, inputRank)) {
       using AV = affine::AffineValueExpr;
@@ -502,8 +502,7 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
     bool hasHighPad = !isConstantIntValue(high, 0);
     auto offset = offsets[dim];
     auto length = sizes[dim];
-    auto srcSize =
-        tensor::createDimValue(b, loc, padOp.getSource(), dim).value();
+    auto srcSize = tensor::getMixedSize(b, loc, padOp.getSource(), dim);
 
     // The new amount of low padding is `low - offset`. Except for the case
     // where none of the low padding is read. In that case, the new amount of

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
index b7f5218e8f6a9..e0acaee9f6626 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
@@ -26,27 +26,6 @@ using namespace mlir;
 using namespace mlir::affine;
 using namespace mlir::tensor;
 
-/// Get the dimension size of a value of RankedTensor type at the
-static OpFoldResult getShapeDimSize(OpBuilder &b, Location loc,
-                                    Value rankedTensor, int64_t dimIdx) {
-  RankedTensorType tensorType = cast<RankedTensorType>(rankedTensor.getType());
-  if (!tensorType.isDynamicDim(dimIdx)) {
-    return b.getIndexAttr(tensorType.getDimSize(dimIdx));
-  }
-  Value idxValue = b.create<arith::ConstantIndexOp>(loc, dimIdx);
-  return b.createOrFold<tensor::DimOp>(loc, rankedTensor, idxValue);
-}
-
-/// Get all the dimension sizes of a value of RankedTensor type.
-static SmallVector<OpFoldResult> getShapeDimSizes(OpBuilder &b, Location loc,
-                                                  Value rankedTensor) {
-  SmallVector<OpFoldResult> dimSizes;
-  RankedTensorType tensorType = cast<RankedTensorType>(rankedTensor.getType());
-  for (unsigned i = 0; i < tensorType.getRank(); i++)
-    dimSizes.push_back(getShapeDimSize(b, loc, rankedTensor, i));
-  return dimSizes;
-}
-
 /// A tuple that represents (dimension number, dimension value).
 using DimAndIndex = std::tuple<unsigned, Value>;
 
@@ -123,7 +102,8 @@ tensor::ExtractSliceFromCollapseHelper::create(OpBuilder &b,
   llvm::SmallBitVector slicedDimensions =
       getSlicedDimensions(collapseShapeOutputShape, sliceParams);
 
-  auto collapseShapeInputShape = getShapeDimSizes(b, op.getLoc(), op.getSrc());
+  auto collapseShapeInputShape =
+      tensor::getMixedSizes(b, op.getLoc(), op.getSrc());
 
   SmallVector<Value> tileSizes;
   for (unsigned i = 0; i < sliceParams.size(); i++) {
@@ -193,7 +173,7 @@ tensor::simplifyCollapseShapeWithRankReducingExtractSlice(
   auto one = rewriter.getIndexAttr(1);
   SmallVector<OpFoldResult> offsets(sourceType.getRank(), zero);
   SmallVector<OpFoldResult> sizes =
-      getShapeDimSizes(rewriter, op.getLoc(), op.getSrc());
+      tensor::getMixedSizes(rewriter, op.getLoc(), op.getSrc());
   SmallVector<OpFoldResult> strides(sourceType.getRank(), one);
   auto sliceOp = rewriter.create<tensor::ExtractSliceOp>(
       op.getLoc(), info->sliceResultType, op.getSrc(), offsets, sizes, strides);

diff  --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
index 4d5404a3be2dd..3ea81d44a35bf 100644
--- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
@@ -34,9 +34,9 @@ PadOp mlir::tensor::createPadHighOp(RankedTensorType type, Value source,
     // Compute the padding width.
     AffineExpr d0;
     bindDims(b.getContext(), d0);
-    auto dimOp = b.createOrFold<tensor::DimOp>(loc, source, en.index());
+    OpFoldResult sz = tensor::getMixedSize(b, loc, source, en.index());
     high[en.index()] =
-        affine::makeComposedAffineApply(b, loc, en.value() - d0, {dimOp})
+        affine::makeComposedAffineApply(b, loc, en.value() - d0, {sz})
             .getResult();
   }
   return b.create<PadOp>(loc, type, source, low, high, pad, nofold);
@@ -55,35 +55,6 @@ SmallVector<Value> mlir::tensor::createDynamicDimValues(OpBuilder &b,
   return dynamicDims;
 }
 
-FailureOr<OpFoldResult> mlir::tensor::createDimValue(OpBuilder &b, Location loc,
-                                                     Value rankedTensor,
-                                                     int64_t dim) {
-  auto tensorTy = dyn_cast<RankedTensorType>(rankedTensor.getType());
-  if (!tensorTy)
-    return failure();
-  auto shape = tensorTy.getShape();
-  if (dim >= static_cast<int64_t>(shape.size()))
-    return failure();
-  if (ShapedType::isDynamic(shape[dim]))
-    return OpFoldResult(b.createOrFold<tensor::DimOp>(loc, rankedTensor, dim));
-  return OpFoldResult(b.getIndexAttr(shape[dim]));
-}
-
-SmallVector<OpFoldResult>
-mlir::tensor::createDimValues(OpBuilder &b, Location loc, Value rankedTensor) {
-  auto tensorTy = cast<RankedTensorType>(rankedTensor.getType());
-  SmallVector<OpFoldResult> dims;
-  for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
-    if (ShapedType::isDynamic(en.value())) {
-      dims.push_back(
-          b.createOrFold<tensor::DimOp>(loc, rankedTensor, en.index()));
-    } else {
-      dims.push_back(b.getIndexAttr(en.value()));
-    }
-  }
-  return dims;
-}
-
 FailureOr<RankedTensorType>
 mlir::tensor::computeTransposedType(RankedTensorType rankedTensorType,
                                     ArrayRef<int64_t> transposeVector) {


        


More information about the Mlir-commits mailing list