[Mlir-commits] [mlir] b0d02b6 - [MLIR] Minor cleanup for Shape dialect.

Rahul Joshi llvmlistbot at llvm.org
Wed Dec 9 14:22:22 PST 2020


Author: Rahul Joshi
Date: 2020-12-09T14:21:35-08:00
New Revision: b0d02b698b94d2fc5f7fbd430f5e9d3b032f8523

URL: https://github.com/llvm/llvm-project/commit/b0d02b698b94d2fc5f7fbd430f5e9d3b032f8523
DIFF: https://github.com/llvm/llvm-project/commit/b0d02b698b94d2fc5f7fbd430f5e9d3b032f8523.diff

LOG: [MLIR] Minor cleanup for Shape dialect.

- Remove some unused types from the Shape dialect
- Fix from_extent_tensor to only allow 1D index tensors
- Fix assuming_yield to only allow shape.assuming as the parent op.
- Fix some documentation typos and reword some things.

Differential Revision: https://reviews.llvm.org/D92901

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Shape/IR/Shape.h
    mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
    mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
    mlir/lib/Dialect/Shape/IR/Shape.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Shape/IR/Shape.h b/mlir/include/mlir/Dialect/Shape/IR/Shape.h
index eab3c6f67ca0..db2862141ea9 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/Shape.h
+++ b/mlir/include/mlir/Dialect/Shape/IR/Shape.h
@@ -31,18 +31,6 @@ namespace shape {
 /// Alias type for extent tensors.
 RankedTensorType getExtentTensorType(MLIRContext *ctx);
 
-/// The component type corresponding to shape, element type and attribute.
-class ComponentType : public Type::TypeBase<ComponentType, Type, TypeStorage> {
-public:
-  using Base::Base;
-};
-
-/// The element type of the shaped type.
-class ElementType : public Type::TypeBase<ElementType, Type, TypeStorage> {
-public:
-  using Base::Base;
-};
-
 /// The shape descriptor type represents rank and dimension sizes.
 class ShapeType : public Type::TypeBase<ShapeType, Type, TypeStorage> {
 public:

diff  --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
index c9103a2b8b63..a7868e74c65f 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
@@ -39,29 +39,11 @@ def ShapeDialect : Dialect {
   let hasConstantMaterializer = 1;
 }
 
-def Shape_ComponentType : DialectType<ShapeDialect,
-    CPred<"$_self.isa<::mlir::shape::ComponentType>()">, "component type">,
-    BuildableType<"$_builder.getType<::mlir::shape::ComponentType>()"> {
-  let typeDescription = [{
-    `shape.component_type` represents the tuple of shape, element type and
-    attribute.
-  }];
-}
-
-def Shape_ElementType : DialectType<ShapeDialect,
-    CPred<"$_self.isa<::mlir::shape::ElementType>()">, "element type">,
-    BuildableType<"$_builder.getType<::mlir::shape::ElementType>()"> {
-  let typeDescription = [{
-    `shape.element_type` represents the element type of the ShapedType. It may
-    be unknown, error or regular element type supported by ShapedType.
-  }];
-}
-
 def Shape_ShapeType : DialectType<ShapeDialect,
     CPred<"$_self.isa<::mlir::shape::ShapeType>()">, "shape">,
     BuildableType<"$_builder.getType<::mlir::shape::ShapeType>()"> {
   let typeDescription = [{
-    `shape.type` represents either an unranked shape, a ranked shape with
+    `shape.shape` represents either an unranked shape, a ranked shape with
     possibly unknown dimensions or an invalid shape. The rank is of type
     `shape.size` and, if rank is known, the extent is a 1D tensor of type
     `shape.size`.
@@ -96,12 +78,12 @@ def Shape_ValueShapeType : DialectType<ShapeDialect,
   let typeDescription = [{
     `shape.value_shape` represents the value produced by an operation (this
     corresponds to `Value` in the compiler) and a shape. Conceptually this is a
-    tuple of a value (potentially unknown) and `shape.type`. The value and shape
-    can either or both be unknown. If both the `value` and `shape` are known,
-    then the shape of `value` is conformant with `shape`. That is, the shape of
-    the value conforms to the shape of the ValueShape, so that if we have
-    ValueShape `(value, shape)` then `join(shape_of(value), shape)` would be
-    error free and in particular it means that if both are statically known,
+    tuple of a value (potentially unknown) and `shape.shape`. The value and
+    shape can either or both be unknown. If both the `value` and `shape` are
+    known, then the shape of `value` is conformant with `shape`. That is, the
+    shape of the value conforms to the shape of the ValueShape, so that if we
+    have ValueShape `(value, shape)` then `join(shape_of(value), shape)` would
+    be error free and in particular it means that if both are statically known,
     then they are equal.
   }];
 }
@@ -112,8 +94,8 @@ def Shape_ExtentTensorType :
                   "$_builder.getType<::mlir::IndexType>())"> {
   let typeDescription = [{
     The extent tensor is a tensor of rank one with arbitrarily many index
-    elements. Like `!shape.shape`, it is used to represent shapes with the
-    
diff erence that it is guaranteed to be error-free.
+    elements (tensor<?xindex>). Like `!shape.shape`, it is used to represent
+    shapes with the 
diff erence that it is guaranteed to be error-free.
   }];
 }
 

diff  --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
index 552de7e78f91..0cbb910e062c 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
@@ -34,7 +34,9 @@ def Shape_AddOp : Shape_Op<"add", [Commutative, NoSideEffect]> {
     Adds two sizes or indices. If either operand is an error it will be
     propagated to the result. The operands can be of type `size` or `index`. If
     at least one of the operands can hold an error, i.e. if it is of type `size`,
-    then also the result must be of type `size`.
+    the result must be of type `size`. If error propagation is not possible
+    because both operands are of type `index` then the result may be of type
+    `size` or `index`.
   }];
 
   let arguments = (ins Shape_SizeOrIndexType:$lhs, Shape_SizeOrIndexType:$rhs);
@@ -177,7 +179,7 @@ def Shape_FromExtentTensorOp : Shape_Op<"from_extent_tensor", [NoSideEffect]> {
     extents match the values of the elements.
   }];
 
-  let arguments = (ins IndexTensor:$input);
+  let arguments = (ins 1DTensorOf<[Index]>:$input);
   let results = (outs Shape_ShapeType:$result);
 
   let assemblyFormat = "$input attr-dict `:` type($input)";
@@ -247,7 +249,7 @@ def Shape_GetExtentOp : Shape_Op<"get_extent", [NoSideEffect]> {
   let summary = "Gets the specified extent from a shape or extent tensor";
   let description = [{
     Gets the extent indexed by `dim` from the `shape` operand. If the shape is
-    an error then it returns an error size.
+    an error then it returns an invalid size.
   }];
   let arguments = (ins Shape_ShapeOrExtentTensorType:$shape,
                        Shape_SizeOrIndexType:$dim);
@@ -289,7 +291,7 @@ def Shape_IndexToSizeOp : Shape_Op<"index_to_size", [NoSideEffect]> {
 }
 
 def Shape_JoinOp : Shape_Op<"join", [Commutative]> {
-  let summary = "Returns the least general shape.size of its operands";
+  let summary = "Returns the least general shape.shape of its operands";
   let description = [{
     An operation that computes the least general shape of input operands.
     This effectively asserts that corresponding static dimensions are equal.
@@ -327,9 +329,9 @@ def Shape_MulOp : Shape_Op<"mul", [Commutative, NoSideEffect]> {
     Multiplies two sizes or indices. If either operand is an error it will be
     propagated to the result. The operands can be of type `size` or `index`. If
     at least one of the operands can hold an error, i.e. if it is of type `size`,
-    then also the result must be of type `size`. If error propagation is not
-    possible because both operands are of type `index` then the result must also
-    be of type `index`.
+    the result must be of type `size`. If error propagation is not possible
+    because both operands are of type `index` then the result may be of type
+    `size` or `index`.
   }];
 
   let arguments = (ins Shape_SizeOrIndexType:$lhs, Shape_SizeOrIndexType:$rhs);
@@ -369,23 +371,22 @@ def Shape_ReduceOp : Shape_Op<"reduce",
   let summary = "Returns an expression reduced over a shape or extent tensor";
   let description = [{
     An operation that takes as input a shape or extent tensor, and a number of
-    initial values. This operation has a region/function that is applied
-    repeatedly for every extent of the input. Starting with the initial values,
-    the individual extents are then aggregated as defined by the associated
-    region.
+    initial values. This operation has a region that is applied repeatedly for
+    every extent of the input. Starting with the initial values, the individual
+    extents are then aggregated as defined by the associated region.
 
     Conceptually this op performs the following reduction:
 
     ```
     res[] = init;
     for (int i = 0, i < shape.rank(); i++) {
-      res = fn(i, shape[i], res[0], ..., res[n]);
+      res = reduce(i, shape[i], res[0], ..., res[n]);
     }
     ```
 
-    Where `fn` is provided by the user and the result of the reduce op is the
-    last computed output of the reduce function. As an example, computing the
-    number of elements can be defined as follows:
+    Where `reduce` represents the region attached and the result of the reduce
+    op is the last computed output of the reduce region. As an example, the
+    number of elements can be computed as follows:
 
     ```mlir
     func @reduce(%shape : !shape.shape, %init : !shape.size) -> !shape.size {
@@ -669,13 +670,13 @@ def Shape_AssumingOp : Shape_Op<"assuming",
 }
 
 def Shape_AssumingYieldOp : Shape_Op<"assuming_yield",
-                                     [NoSideEffect, ReturnLike, Terminator]> {
+       [NoSideEffect, ReturnLike, Terminator, HasParent<"AssumingOp">]> {
   let summary = "Yield operation";
   let description = [{
-    This yield operation represents a return operation within the assert_and_exec
-    region. The operation takes variable number of operands and produces no
-    results. The operand number and types must match the return signature of
-    the region that contains the operation.
+    This yield operation represents a return operation within the
+    `shape.assuming` operation region. The operation takes variable number of
+    operands and produces no results. The operand number and types must match
+    the number and types of parent `shape.assuming` results.
   }];
 
   let arguments = (ins Variadic<AnyType>:$operands);
@@ -742,7 +743,7 @@ def Shape_ConstWitnessOp : Shape_Op<"const_witness", [ConstantLike, NoSideEffect
 
   ```mlir
   %0 = shape.const_shape [1,2,3]
-  %1 = shape.const_shape [1, 2, 3]
+  %1 = shape.const_shape [1,2,3]
   %w0 = shape.cstr_eq(%0, %1) // Can be folded to "const_witness true"
   %w1 = shape.const_witness true
   %w2 = shape.assuming_all(%w0, %w2) // Can be folded to "const_witness true"

diff  --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 44f897cbf505..c71360cdaba5 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -31,10 +31,9 @@ RankedTensorType shape::getExtentTensorType(MLIRContext *ctx) {
 }
 
 static bool isErrorPropagationPossible(TypeRange operandTypes) {
-  for (Type ty : operandTypes)
-    if (ty.isa<SizeType>() || ty.isa<ShapeType>() || ty.isa<ValueShapeType>())
-      return true;
-  return false;
+  return llvm::any_of(operandTypes, [](Type ty) {
+    return ty.isa<SizeType, ShapeType, ValueShapeType>();
+  });
 }
 
 static LogicalResult verifySizeOrIndexOp(Operation *op) {
@@ -92,8 +91,7 @@ void ShapeDialect::initialize() {
 #define GET_OP_LIST
 #include "mlir/Dialect/Shape/IR/ShapeOps.cpp.inc"
       >();
-  addTypes<ComponentType, ElementType, ShapeType, SizeType, ValueShapeType,
-           WitnessType>();
+  addTypes<ShapeType, SizeType, ValueShapeType, WitnessType>();
   addInterfaces<ShapeInlinerInterface>();
   // Allow unknown operations during prototyping and testing. As the dialect is
   // still evolving it makes it simple to start with an unregistered ops and
@@ -123,10 +121,6 @@ Type ShapeDialect::parseType(DialectAsmParser &parser) const {
   if (parser.parseKeyword(&keyword))
     return Type();
 
-  if (keyword == "component")
-    return ComponentType::get(getContext());
-  if (keyword == "element")
-    return ElementType::get(getContext());
   if (keyword == "shape")
     return ShapeType::get(getContext());
   if (keyword == "size")
@@ -143,8 +137,6 @@ Type ShapeDialect::parseType(DialectAsmParser &parser) const {
 /// Print a type registered to this dialect.
 void ShapeDialect::printType(Type type, DialectAsmPrinter &os) const {
   TypeSwitch<Type>(type)
-      .Case<ComponentType>([&](Type) { os << "component"; })
-      .Case<ElementType>([&](Type) { os << "element"; })
       .Case<ShapeType>([&](Type) { os << "shape"; })
       .Case<SizeType>([&](Type) { os << "size"; })
       .Case<ValueShapeType>([&](Type) { os << "value_shape"; })


        


More information about the Mlir-commits mailing list