[Mlir-commits] [mlir] 0128f80 - [mlir][sparse] Fix the codegen for the convert operator to handle hidden nop convert.
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Oct 20 12:30:26 PDT 2022
Author: bixia1
Date: 2022-10-20T12:30:21-07:00
New Revision: 0128f8016770655fe7a40d3657f00853e6badb93
URL: https://github.com/llvm/llvm-project/commit/0128f8016770655fe7a40d3657f00853e6badb93
DIFF: https://github.com/llvm/llvm-project/commit/0128f8016770655fe7a40d3657f00853e6badb93.diff
LOG: [mlir][sparse] Fix the codegen for the convert operator to handle hidden nop convert.
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D136291
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/test/Dialect/SparseTensor/codegen.mlir
mlir/test/Dialect/SparseTensor/fold.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index df02c5e801da9..822fd44b306d9 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -71,6 +71,12 @@ def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
data structure transformations. Also, conversions from sparse tensor types
into dense tensor types may be infeasible in terms of storage requirements.
+ Trivial dense-to-dense convert will be removed by canonicalization while
+ trivial sparse-to-sparse convert will be removed by the sparse codegen. This
+ is because we use trivial sparse-to-sparse convert to tell bufferization
+ that the sparse codegen will expand the tensor buffer into sparse tensor
+ storage.
+
Examples:
```mlir
@@ -86,6 +92,7 @@ def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
}];
let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
+ let hasFolder = 1;
let hasVerifier = 1;
}
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 95a7a47827e40..14313caf87cc8 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -333,6 +333,17 @@ LogicalResult ConvertOp::verify() {
return emitError("unexpected type in convert");
}
+OpFoldResult ConvertOp::fold(ArrayRef<Attribute> operands) {
+ Type dstType = getType();
+ // Fold trivial dense-to-dense convert and leave trivial sparse-to-sparse
+ // convert for codegen to remove. This is because we use trivial
+ // sparse-to-sparse convert to tell bufferization that the sparse codegen
+ // will expand the tensor buffer into sparse tensor storage.
+ if (!getSparseTensorEncoding(dstType) && dstType == getSource().getType())
+ return getSource();
+ return {};
+}
+
LogicalResult ToPointersOp::verify() {
auto e = getSparseTensorEncoding(getTensor().getType());
if (failed(isInBounds(getDimension().getZExtValue(), getTensor())))
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index e8d0e768852ad..1beb1271103b4 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -727,7 +727,10 @@ class SparseConvertConverter : public OpConversionPattern<ConvertOp> {
LogicalResult
matchAndRewrite(ConvertOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- if (op.getType() != op.getSource().getType()) {
+ SparseTensorEncodingAttr encDst = getSparseTensorEncoding(op.getType());
+ SparseTensorEncodingAttr encSrc =
+ getSparseTensorEncoding(op.getSource().getType());
+ if (encDst != encSrc) {
// This should be handled by rewriting before codegen.
return failure();
}
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index bb2eb88d2e1d4..6b5c6c4ce3808 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -526,7 +526,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
// CHECK-SAME: %[[A3:.*]]: memref<?xi64>,
// CHECK-SAME: %[[A4:.*]]: memref<?xf32>)
// CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]] : memref<1xindex>, memref<3xindex>, memref<?xi32>, memref<?xi64>, memref<?xf32>
-func.func @sparse_nop_convert(%arg0: tensor<?xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
- %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector> to tensor<?xf32, #SparseVector>
+func.func @sparse_nop_convert(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
+ %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector>
return %0 : tensor<?xf32, #SparseVector>
}
diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir
index fba2e8e9ceecb..6b1ebb173e24b 100644
--- a/mlir/test/Dialect/SparseTensor/fold.mlir
+++ b/mlir/test/Dialect/SparseTensor/fold.mlir
@@ -2,6 +2,15 @@
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+// CHECK-LABEL: func @sparse_nop_dense2dense_convert(
+// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
+// CHECK-NOT: sparse_tensor.convert
+// CHECK: return %[[A]] : tensor<64xf32>
+func.func @sparse_nop_dense2dense_convert(%arg0: tensor<64xf32>) -> tensor<64xf32> {
+ %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32>
+ return %0 : tensor<64xf32>
+}
+
// CHECK-LABEL: func @sparse_dce_convert(
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
// CHECK-NOT: sparse_tensor.convert
More information about the Mlir-commits
mailing list