[Mlir-commits] [mlir] 974b4bf - [mlir][sparse] Add expand_symmetry attribute to the new operator.
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Nov 23 16:32:20 PST 2022
Author: bixia1
Date: 2022-11-23T16:32:15-08:00
New Revision: 974b4bf9fdf89448f8369a9b69f19e27d8a7949d
URL: https://github.com/llvm/llvm-project/commit/974b4bf9fdf89448f8369a9b69f19e27d8a7949d
DIFF: https://github.com/llvm/llvm-project/commit/974b4bf9fdf89448f8369a9b69f19e27d8a7949d.diff
LOG: [mlir][sparse] Add expand_symmetry attribute to the new operator.
The attribute tells the operator to handle symmetric structures for 2D tensors.
By default, the operator assumes the input tensor is not symmetric.
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D138230
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
mlir/test/Dialect/SparseTensor/invalid.mlir
mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
mlir/test/Dialect/SparseTensor/roundtrip.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 64facdc0a4113..183d1fdc45f69 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -26,7 +26,7 @@ class SparseTensor_Op<string mnemonic, list<Trait> traits = []>
//===----------------------------------------------------------------------===//
def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>,
- Arguments<(ins AnyType:$source)>,
+ Arguments<(ins AnyType:$source, UnitAttr:$expandSymmetry)>,
Results<(outs AnySparseTensor:$result)> {
string summary = "Materializes a new sparse tensor from given source";
string description = [{
@@ -39,13 +39,22 @@ def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>,
code. The operation is provided as an anchor that materializes a properly
typed sparse tensor with inital contents into a computation.
+ An optional attribute `expandSymmetry` can be used to extend this operation
+ to make symmetry in external formats explicit in the storage. That is, when
+ the attribute presents and a non-zero value is discovered at (i, j) where
+ i!=j, we add the same value to (j, i). This claims more storage than a pure
+ symmetric storage, and thus may cause a bad performance hit. True symmetric
+ storage is planned for the future.
+
Example:
```mlir
sparse_tensor.new %source : !Source to tensor<1024x1024xf64, #CSR>
```
}];
- let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
+ let assemblyFormat = "(`expand_symmetry` $expandSymmetry^)? $source attr-dict"
+ "`:` type($source) `to` type($result)";
+ let hasVerifier = 1;
}
def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 7ee988bdcd16c..f4649b3876cc4 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -334,6 +334,13 @@ static LogicalResult isMatchingWidth(Value result, unsigned width) {
return failure();
}
+LogicalResult NewOp::verify() {
+ if (getExpandSymmetry() &&
+ getResult().getType().cast<RankedTensorType>().getRank() != 2)
+ return emitOpError("expand_symmetry can only be used for 2D tensors");
+ return success();
+}
+
LogicalResult ConvertOp::verify() {
if (auto tp1 = getSource().getType().dyn_cast<RankedTensorType>()) {
if (auto tp2 = getDest().getType().dyn_cast<RankedTensorType>()) {
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 7b9eba9937583..958aab6698e9c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -915,8 +915,8 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
{indexTp}, {reader}, EmitCInterface::Off)
.getResult(0);
Value symmetric;
- // We assume only rank 2 tensors may have the isSymmetric flag set.
- if (rank == 2) {
+ // The verifier ensures only 2D tensors can have the expandSymmetry flag.
+ if (rank == 2 && op.getExpandSymmetry()) {
symmetric =
createFuncCall(rewriter, loc, "getSparseTensorReaderIsSymmetric",
{rewriter.getI1Type()}, {reader}, EmitCInterface::Off)
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index 02fb97bc866c6..9b412e1497694 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -180,6 +180,16 @@ func.func @sparse_wrong_arity_compression(%arg0: memref<?xf64>,
// -----
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+func.func @sparse_new(%arg0: !llvm.ptr<i8>) {
+ // expected-error at +1 {{expand_symmetry can only be used for 2D tensors}}
+ %0 = sparse_tensor.new expand_symmetry %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector>
+ return
+}
+
+// -----
+
func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
// expected-error at +1 {{unexpected type in convert}}
%0 = sparse_tensor.convert %arg0 : tensor<*xf32> to tensor<10xf32>
diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
index 31b35d75733c1..1547b0641cb13 100644
--- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
@@ -5,7 +5,7 @@
dimLevelType = ["dense", "compressed"]
}>
-// CHECK-LABEL: func.func @sparse_new(
+// CHECK-LABEL: func.func @sparse_new_symmetry(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> {
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -39,6 +39,37 @@
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[T5]]
// CHECK: bufferization.dealloc_tensor %[[T5]]
// CHECK: return %[[R]]
+func.func @sparse_new_symmetry(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSR> {
+ %0 = sparse_tensor.new expand_symmetry %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #CSR>
+ return %0 : tensor<?x?xf32, #CSR>
+}
+
+// CHECK-LABEL: func.func @sparse_new(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> {
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK: %[[R:.*]] = call @createSparseTensorReader(%[[A]])
+// CHECK: %[[DS:.*]] = memref.alloca(%[[C2]]) : memref<?xindex>
+// CHECK: call @getSparseTensorReaderDimSizes(%[[R]], %[[DS]])
+// CHECK: %[[D0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]]
+// CHECK: %[[D1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]]
+// CHECK: %[[T:.*]] = bufferization.alloc_tensor(%[[D0]], %[[D1]])
+// CHECK: %[[N:.*]] = call @getSparseTensorReaderNNZ(%[[R]])
+// CHECK: %[[VB:.*]] = memref.alloca()
+// CHECK: %[[T2:.*]] = scf.for %{{.*}} = %[[C0]] to %[[N]] step %[[C1]] iter_args(%[[A2:.*]] = %[[T]])
+// CHECK: func.call @getSparseTensorReaderNextF32(%[[R]], %[[DS]], %[[VB]])
+// CHECK: %[[E0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]]
+// CHECK: %[[E1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]]
+// CHECK: %[[V:.*]] = memref.load %[[VB]][]
+// CHECK: %[[T1:.*]] = sparse_tensor.insert %[[V]] into %[[A2]]{{\[}}%[[E0]], %[[E1]]]
+// CHECK: scf.yield %[[T1]]
+// CHECK: }
+// CHECK: call @delSparseTensorReader(%[[R]])
+// CHECK: %[[T4:.*]] = sparse_tensor.load %[[T2]] hasInserts
+// CHECK: %[[R:.*]] = sparse_tensor.convert %[[T4]]
+// CHECK: bufferization.dealloc_tensor %[[T4]]
+// CHECK: return %[[R]]
func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSR> {
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #CSR>
return %0 : tensor<?x?xf32, #CSR>
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index bc664ae3d2d00..ecaedae6827c2 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -13,6 +13,19 @@ func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
// -----
+#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
+
+// CHECK-LABEL: func @sparse_new_symmetry(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK: %[[T:.*]] = sparse_tensor.new expand_symmetry %[[A]] : !llvm.ptr<i8> to tensor<?x?xf64, #{{.*}}>
+// CHECK: return %[[T]] : tensor<?x?xf64, #{{.*}}>
+func.func @sparse_new_symmetry(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf64, #SparseMatrix> {
+ %0 = sparse_tensor.new expand_symmetry %arg0 : !llvm.ptr<i8> to tensor<?x?xf64, #SparseMatrix>
+ return %0 : tensor<?x?xf64, #SparseMatrix>
+}
+
+// -----
+
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
// CHECK-LABEL: func @sparse_dealloc(
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
index 13a3ae25c0f0d..3addb4fc34eb4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
@@ -63,7 +63,7 @@ module {
// Read the sparse matrix from file, construct sparse storage.
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
- %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #SparseMatrix>
+ %a = sparse_tensor.new expand_symmetry %fileName : !Filename to tensor<?x?xf64, #SparseMatrix>
// Call the kernel.
%0 = call @kernel_sum_reduce(%a, %x)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
index 6eae1e7f3dbc8..79efc1302a620 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
@@ -66,7 +66,7 @@ module {
// Read the sparse matrix from file, construct sparse storage.
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
- %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xcomplex<f64>, #SparseMatrix>
+ %a = sparse_tensor.new expand_symmetry %fileName : !Filename to tensor<?x?xcomplex<f64>, #SparseMatrix>
// Call the kernel.
%0 = call @kernel_sum_reduce(%a, %x)
More information about the Mlir-commits
mailing list