[Mlir-commits] [mlir] 85defd2 - [mlir][shape] Use memref of index in shape lowering
Stephan Herhut
llvmlistbot at llvm.org
Thu Jul 30 06:20:14 PDT 2020
Author: Stephan Herhut
Date: 2020-07-30T15:12:43+02:00
New Revision: 85defd23aa09f2fa8bf48fed7a04ed4a5851cd0c
URL: https://github.com/llvm/llvm-project/commit/85defd23aa09f2fa8bf48fed7a04ed4a5851cd0c
DIFF: https://github.com/llvm/llvm-project/commit/85defd23aa09f2fa8bf48fed7a04ed4a5851cd0c.diff
LOG: [mlir][shape] Use memref of index in shape lowering
Now that we can have a memref of index type, we no longer need to materialize shapes in i64 and then index_cast.
Differential Revision: https://reviews.llvm.org/D84938
Added:
Modified:
mlir/lib/Conversion/ShapeToSCF/ShapeToSCF.cpp
mlir/test/Conversion/ShapeToSCF/shape-to-scf.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Conversion/ShapeToSCF/ShapeToSCF.cpp b/mlir/lib/Conversion/ShapeToSCF/ShapeToSCF.cpp
index 0101c9e7fdc0..a6c667f5641c 100644
--- a/mlir/lib/Conversion/ShapeToSCF/ShapeToSCF.cpp
+++ b/mlir/lib/Conversion/ShapeToSCF/ShapeToSCF.cpp
@@ -186,8 +186,8 @@ ShapeOfOpConverter::matchAndRewrite(ShapeOfOp op, ArrayRef<Value> operands,
// Allocate stack memory.
auto loc = op.getLoc();
Value rank = rewriter.create<mlir::RankOp>(loc, arg);
- Type i64Ty = rewriter.getI64Type();
- Type memTy = MemRefType::get({ShapedType::kDynamicSize}, i64Ty);
+ Type indexTy = rewriter.getIndexType();
+ Type memTy = MemRefType::get({ShapedType::kDynamicSize}, indexTy);
Value mem = rewriter.create<AllocaOp>(loc, memTy, ValueRange{rank});
// Copy shape extents to stack-allocated memory.
@@ -197,15 +197,12 @@ ShapeOfOpConverter::matchAndRewrite(ShapeOfOp op, ArrayRef<Value> operands,
loc, zero, rank, one, llvm::None,
[&](OpBuilder &b, Location loc, Value iv, ValueRange args) {
Value dim = rewriter.create<DimOp>(loc, arg, iv);
- Value dimInt = rewriter.create<IndexCastOp>(loc, dim, i64Ty);
- rewriter.create<StoreOp>(loc, dimInt, mem, ValueRange{iv});
+ rewriter.create<StoreOp>(loc, dim, mem, ValueRange{iv});
rewriter.create<scf::YieldOp>(loc);
});
// Load extents to tensor value.
- Value extentTensorInt = rewriter.create<TensorLoadOp>(loc, mem);
- rewriter.replaceOpWithNewOp<IndexCastOp>(op.getOperation(), extentTensorInt,
- op.getType());
+ rewriter.replaceOpWithNewOp<TensorLoadOp>(op.getOperation(), mem);
return success();
}
diff --git a/mlir/test/Conversion/ShapeToSCF/shape-to-scf.mlir b/mlir/test/Conversion/ShapeToSCF/shape-to-scf.mlir
index 97d2bce5a094..768a627208b8 100644
--- a/mlir/test/Conversion/ShapeToSCF/shape-to-scf.mlir
+++ b/mlir/test/Conversion/ShapeToSCF/shape-to-scf.mlir
@@ -40,16 +40,14 @@ func @shape_of(%arg : tensor<*xf32>) {
// CHECK-SAME: (%[[ARG:.*]]: tensor<*xf32>)
func @shape_of_unranked(%arg : tensor<*xf32>) {
// CHECK: %[[RANK:.*]] = rank %[[ARG]] : tensor<*xf32>
- // CHECK: %[[SHAPE_MEM:.*]] = alloca(%[[RANK]]) : memref<?xi64>
+ // CHECK: %[[SHAPE_MEM:.*]] = alloca(%[[RANK]]) : memref<?xindex>
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[RANK]] step %[[C1]] {
// CHECK: %[[DIM:.]] = dim %[[ARG]], %[[I]] : tensor<*xf32>
- // CHECK: %[[DIM_INT:.*]] = index_cast %[[DIM]] : index to i64
- // CHECK: store %[[DIM_INT]], %[[SHAPE_MEM]][%[[I]]] : memref<?xi64>
+ // CHECK: store %[[DIM]], %[[SHAPE_MEM]][%[[I]]] : memref<?xindex>
// CHECK: }
- // CHECK: %[[SHAPE_INT:.*]] = tensor_load %[[SHAPE_MEM]] : memref<?xi64>
- // CHECK: %[[SHAPE:.*]] = index_cast %[[SHAPE_INT]] : tensor<?xi64> to tensor<?xindex>
+ // CHECK: %[[SHAPE:.*]] = tensor_load %[[SHAPE_MEM]] : memref<?xindex>
%shape = shape.shape_of %arg : tensor<*xf32> -> tensor<?xindex>
return
}
More information about the Mlir-commits
mailing list