[Mlir-commits] [mlir] 5106a8b - [MLIR][Shape] Lower `shape_of` to `dynamic_tensor_from_elements`

Frederik Gossen llvmlistbot at llvm.org
Wed Sep 9 00:55:35 PDT 2020


Author: Frederik Gossen
Date: 2020-09-09T07:55:13Z
New Revision: 5106a8b8f8d0d3dd6c3fc0554f05402d8d9177ef

URL: https://github.com/llvm/llvm-project/commit/5106a8b8f8d0d3dd6c3fc0554f05402d8d9177ef
DIFF: https://github.com/llvm/llvm-project/commit/5106a8b8f8d0d3dd6c3fc0554f05402d8d9177ef.diff

LOG: [MLIR][Shape] Lower `shape_of` to `dynamic_tensor_from_elements`

Take advantage of the new `dynamic_tensor_from_elements` operation in `std`.
Instead of stack-allocated memory, we can now lower directly to a single `std`
operation.

Differential Revision: https://reviews.llvm.org/D86935

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
    mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
    mlir/lib/Dialect/StandardOps/IR/Ops.cpp
    mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
index c276818589af..44bbb423b2d9 100644
--- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
@@ -1504,6 +1504,13 @@ def DynamicTensorFromElementsOp : Std_Op<"dynamic_tensor_from_elements",
   let arguments = (ins Variadic<Index>:$dynamicExtents);
   let results = (outs AnyRankedTensor:$result);
   let regions = (region SizedRegion<1>:$body);
+
+  let builders = [
+    // Build op and populate its body per callback function.
+    OpBuilder<"OpBuilder &b, OperationState &result, Type resultTy, "
+              "ValueRange dynamicExtents, "
+              "function_ref<void(OpBuilder &, Location, ValueRange)>">,
+  ];
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
index 8c917e08f942..f3f11e89af02 100644
--- a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
+++ b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
@@ -422,6 +422,7 @@ LogicalResult ShapeOfOpConversion::matchAndRewrite(
     return failure();
 
   // For ranked tensor arguments, lower to `tensor_from_elements`.
+  auto loc = op.getLoc();
   ShapeOfOp::Adaptor transformed(operands);
   Value tensor = transformed.arg();
   Type tensorTy = tensor.getType();
@@ -431,7 +432,6 @@ LogicalResult ShapeOfOpConversion::matchAndRewrite(
     SmallVector<Value, 8> extentValues;
     RankedTensorType rankedTensorTy = tensorTy.cast<RankedTensorType>();
     int64_t rank = rankedTensorTy.getRank();
-    auto loc = op.getLoc();
     for (int64_t i = 0; i < rank; i++) {
       if (rankedTensorTy.isDynamicDim(i)) {
         Value extent = rewriter.create<DimOp>(loc, tensor, i);
@@ -451,26 +451,17 @@ LogicalResult ShapeOfOpConversion::matchAndRewrite(
     return success();
   }
 
-  // Allocate stack memory.
-  auto loc = op.getLoc();
+  // Lower to `dynamic_tensor_from_elements` otherwise.
+  auto *ctx = rewriter.getContext();
   Value rank = rewriter.create<mlir::RankOp>(loc, tensor);
-  Type indexTy = rewriter.getIndexType();
-  Type memTy = MemRefType::get({ShapedType::kDynamicSize}, indexTy);
-  Value mem = rewriter.create<AllocaOp>(loc, memTy, ValueRange{rank});
-
-  // Copy shape extents to stack-allocated memory.
-  Value zero = rewriter.create<ConstantIndexOp>(loc, 0);
-  Value one = rewriter.create<ConstantIndexOp>(loc, 1);
-  rewriter.create<scf::ForOp>(
-      loc, zero, rank, one, llvm::None,
-      [&](OpBuilder &b, Location loc, Value iv, ValueRange args) {
-        Value dim = rewriter.create<DimOp>(loc, tensor, iv);
-        rewriter.create<StoreOp>(loc, dim, mem, ValueRange{iv});
-        rewriter.create<scf::YieldOp>(loc);
+  rewriter.replaceOpWithNewOp<DynamicTensorFromElementsOp>(
+      op, getExtentTensorType(ctx), ValueRange{rank},
+      [&](OpBuilder &b, Location loc, ValueRange args) {
+        Value dim = args.front();
+        Value extent = b.create<DimOp>(loc, tensor, dim);
+        b.create<mlir::YieldOp>(loc, extent);
       });
 
-  // Load extents to tensor value.
-  rewriter.replaceOpWithNewOp<TensorLoadOp>(op.getOperation(), mem);
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 1c6901987019..a0ad05852e23 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -1694,6 +1694,22 @@ static LogicalResult verify(DynamicTensorFromElementsOp op) {
   return success();
 }
 
+void DynamicTensorFromElementsOp::build(
+    OpBuilder &b, OperationState &result, Type resultTy,
+    ValueRange dynamicExtents,
+    function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
+  build(b, result, resultTy, dynamicExtents);
+
+  // Build and populate body.
+  OpBuilder::InsertionGuard guard(b);
+  Region *bodyRegion = result.regions.front().get();
+  auto rank = resultTy.cast<RankedTensorType>().getRank();
+  SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
+  Block *bodyBlock =
+      b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes);
+  bodyBuilder(b, result.location, bodyBlock->getArguments());
+}
+
 //===----------------------------------------------------------------------===//
 // ExtractElementOp
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
index 4d2437a4877b..4168634f1240 100644
--- a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
@@ -191,14 +191,11 @@ func @shape_of(%arg : tensor<*xf32>) {
 // CHECK-SAME: (%[[ARG:.*]]: tensor<*xf32>)
 func @shape_of_unranked(%arg : tensor<*xf32>) {
   // CHECK: %[[RANK:.*]] = rank %[[ARG]] : tensor<*xf32>
-  // CHECK: %[[SHAPE_MEM:.*]] = alloca(%[[RANK]]) : memref<?xindex>
-  // CHECK: %[[C0:.*]] = constant 0 : index
-  // CHECK: %[[C1:.*]] = constant 1 : index
-  // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[RANK]] step %[[C1]] {
-  // CHECK:   %[[DIM:.]] = dim %[[ARG]], %[[I]] : tensor<*xf32>
-  // CHECK:   store %[[DIM]], %[[SHAPE_MEM]][%[[I]]] : memref<?xindex>
-  // CHECK: }
-  // CHECK: %[[SHAPE:.*]] = tensor_load %[[SHAPE_MEM]] : memref<?xindex>
+  // CHECK: %[[SHAPE:.*]] = dynamic_tensor_from_elements %[[RANK]] {
+  // CHECK: ^bb0(%[[I:.*]]: index):
+  // CHECK:   %[[EXTENT:.*]] = dim %[[ARG]], %[[I]] : tensor<*xf32>
+  // CHECK:   yield %[[EXTENT]] : index
+  // CHECK: } : tensor<?xindex>
   %shape = shape.shape_of %arg : tensor<*xf32> -> tensor<?xindex>
   return
 }


        


More information about the Mlir-commits mailing list