[Mlir-commits] [mlir] d314b7d - [MLIR] ShapedType accessor minor fixes + add isDynamicDim accessor

Uday Bondhugula llvmlistbot at llvm.org
Wed Apr 8 20:18:25 PDT 2020


Author: Uday Bondhugula
Date: 2020-04-09T08:47:50+05:30
New Revision: d314b7d5ca94e60f75fe23b4b052f131880e8a2f

URL: https://github.com/llvm/llvm-project/commit/d314b7d5ca94e60f75fe23b4b052f131880e8a2f
DIFF: https://github.com/llvm/llvm-project/commit/d314b7d5ca94e60f75fe23b4b052f131880e8a2f.diff

LOG: [MLIR] ShapedType accessor minor fixes + add isDynamicDim accessor

Minor fixes and cleanup for ShapedType accessors, use
ShapedType::kDynamicSize, add ShapedType::isDynamicDim.

Differential Revision: https://reviews.llvm.org/D77710

Added: 
    

Modified: 
    mlir/include/mlir/IR/StandardTypes.h
    mlir/lib/Analysis/Utils.cpp
    mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
    mlir/lib/Dialect/Affine/IR/AffineOps.cpp
    mlir/lib/Dialect/StandardOps/IR/Ops.cpp
    mlir/lib/IR/StandardTypes.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/IR/StandardTypes.h b/mlir/include/mlir/IR/StandardTypes.h
index b36b348b89fe..cc94d27dedbb 100644
--- a/mlir/include/mlir/IR/StandardTypes.h
+++ b/mlir/include/mlir/IR/StandardTypes.h
@@ -252,7 +252,11 @@ class ShapedType : public Type {
 
   /// If this is ranked type, return the size of the specified dimension.
   /// Otherwise, abort.
-  int64_t getDimSize(int64_t i) const;
+  int64_t getDimSize(unsigned idx) const;
+
+  /// Returns true if this dimension has a dynamic size (for ranked types);
+  /// aborts for unranked types.
+  bool isDynamicDim(unsigned idx) const;
 
   /// Returns the position of the dynamic dimension relative to just the dynamic
   /// dimensions, given its `index` within the shape.
@@ -276,7 +280,9 @@ class ShapedType : public Type {
   }
 
   /// Whether the given dimension size indicates a dynamic dimension.
-  static constexpr bool isDynamic(int64_t dSize) { return dSize < 0; }
+  static constexpr bool isDynamic(int64_t dSize) {
+    return dSize == kDynamicSize;
+  }
   static constexpr bool isDynamicStrideOrOffset(int64_t dStrideOrOffset) {
     return dStrideOrOffset == kDynamicStrideOrOffset;
   }

diff  --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 3d159f24bdf1..90b3e1faf3ef 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -330,11 +330,10 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
   if (addMemRefDimBounds) {
     auto memRefType = memref.getType().cast<MemRefType>();
     for (unsigned r = 0; r < rank; r++) {
-      cst.addConstantLowerBound(r, 0);
-      int64_t dimSize = memRefType.getDimSize(r);
-      if (ShapedType::isDynamic(dimSize))
+      cst.addConstantLowerBound(/*pos=*/r, /*lb=*/0);
+      if (memRefType.isDynamicDim(r))
         continue;
-      cst.addConstantUpperBound(r, dimSize - 1);
+      cst.addConstantUpperBound(/*pos=*/r, memRefType.getDimSize(r) - 1);
     }
   }
   cst.removeTrivialRedundancy();

diff  --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index d9ee9a70439b..a746af7cce61 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -1888,16 +1888,15 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<DimOp> {
     OperandAdaptor<DimOp> transformed(operands);
     MemRefType type = dimOp.getOperand().getType().cast<MemRefType>();
 
-    auto shape = type.getShape();
     int64_t index = dimOp.getIndex();
     // Extract dynamic size from the memref descriptor.
-    if (ShapedType::isDynamic(shape[index]))
+    if (type.isDynamicDim(index))
       rewriter.replaceOp(op, {MemRefDescriptor(transformed.memrefOrTensor())
                                   .size(rewriter, op->getLoc(), index)});
     else
       // Use constant for static size.
-      rewriter.replaceOp(
-          op, createIndexConstant(rewriter, op->getLoc(), shape[index]));
+      rewriter.replaceOp(op, createIndexConstant(rewriter, op->getLoc(),
+                                                 type.getDimSize(index)));
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 6d0c4e9e93ad..0d03dd70039d 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -133,7 +133,7 @@ static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp,
                                     unsigned index) {
   auto memRefType = memrefDefOp.getType();
   // Statically shaped.
-  if (!ShapedType::isDynamic(memRefType.getDimSize(index)))
+  if (!memRefType.isDynamicDim(index))
     return true;
   // Get the position of the dimension among dynamic dimensions;
   unsigned dynamicDimPos = memRefType.getDynamicDimIndex(index);

diff  --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 7c7a8b0a0805..2f0f4b1447c0 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -1068,14 +1068,14 @@ static LogicalResult verify(DimOp op) {
 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
   // Constant fold dim when the size along the index referred to is a constant.
   auto opType = memrefOrTensor().getType();
-  int64_t indexSize = ShapedType::kDynamicSize;
+  int64_t dimSize = ShapedType::kDynamicSize;
   if (auto tensorType = opType.dyn_cast<RankedTensorType>())
-    indexSize = tensorType.getShape()[getIndex()];
+    dimSize = tensorType.getShape()[getIndex()];
   else if (auto memrefType = opType.dyn_cast<MemRefType>())
-    indexSize = memrefType.getShape()[getIndex()];
+    dimSize = memrefType.getShape()[getIndex()];
 
-  if (!ShapedType::isDynamic(indexSize))
-    return IntegerAttr::get(IndexType::get(getContext()), indexSize);
+  if (!ShapedType::isDynamic(dimSize))
+    return IntegerAttr::get(IndexType::get(getContext()), dimSize);
 
   // Fold dim to the size argument for an AllocOp/ViewOp/SubViewOp.
   auto memrefType = opType.dyn_cast<MemRefType>();
@@ -2310,13 +2310,12 @@ Value ViewOp::getDynamicOffset() {
 
 static LogicalResult verifyDynamicStrides(MemRefType memrefType,
                                           ArrayRef<int64_t> strides) {
-  ArrayRef<int64_t> shape = memrefType.getShape();
   unsigned rank = memrefType.getRank();
   assert(rank == strides.size());
   bool dynamicStrides = false;
   for (int i = rank - 2; i >= 0; --i) {
     // If size at dim 'i + 1' is dynamic, set the 'dynamicStrides' flag.
-    if (ShapedType::isDynamic(shape[i + 1]))
+    if (memrefType.isDynamicDim(i + 1))
       dynamicStrides = true;
     // If stride at dim 'i' is not dynamic, return error.
     if (dynamicStrides && strides[i] != MemRefType::getDynamicStrideOrOffset())

diff  --git a/mlir/lib/IR/StandardTypes.cpp b/mlir/lib/IR/StandardTypes.cpp
index 1e7d9f38a2ce..cff65e734752 100644
--- a/mlir/lib/IR/StandardTypes.cpp
+++ b/mlir/lib/IR/StandardTypes.cpp
@@ -184,9 +184,14 @@ int64_t ShapedType::getRank() const { return getShape().size(); }
 
 bool ShapedType::hasRank() const { return !isa<UnrankedTensorType>(); }
 
-int64_t ShapedType::getDimSize(int64_t i) const {
-  assert(i >= 0 && i < getRank() && "invalid index for shaped type");
-  return getShape()[i];
+int64_t ShapedType::getDimSize(unsigned idx) const {
+  assert(idx < getRank() && "invalid index for shaped type");
+  return getShape()[idx];
+}
+
+bool ShapedType::isDynamicDim(unsigned idx) const {
+  assert(idx < getRank() && "invalid index for shaped type");
+  return isDynamic(getShape()[idx]);
 }
 
 unsigned ShapedType::getDynamicDimIndex(unsigned index) const {


        


More information about the Mlir-commits mailing list