[Mlir-commits] [mlir] f3fd739 - [mlir][sparse] Improve the rewriting for NewOp with dimension ordering.
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Mon Jan 9 14:40:38 PST 2023
Author: bixia1
Date: 2023-01-09T14:40:33-08:00
New Revision: f3fd739d39abd3bee289e00a7ae9a4602d995cbb
URL: https://github.com/llvm/llvm-project/commit/f3fd739d39abd3bee289e00a7ae9a4602d995cbb
DIFF: https://github.com/llvm/llvm-project/commit/f3fd739d39abd3bee289e00a7ae9a4602d995cbb.diff
LOG: [mlir][sparse] Improve the rewriting for NewOp with dimension ordering.
Previously, we use a temporary tensor with identity ordering. We now use a
temporary tensor with the destination dimension ordering, to enable the use of
sort_coo for sorting the tensor.
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D141295
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index f923a1d27c377..324c5b32f5053 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -1020,7 +1020,8 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
// get the next element from the input file
// insert the element to %tmp
// %t = sparse_tensor.ConvertOp %tmp
- RankedTensorType cooTp = getUnorderedCOOFromType(dstTp);
+ RankedTensorType cooTp =
+ getUnorderedCOOFromTypeWithOrdering(dstTp, encDst.getDimOrdering());
auto cooBuffer =
rewriter.create<AllocTensorOp>(loc, cooTp, dynSizesArray).getResult();
@@ -1050,10 +1051,10 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
Value indices = dimSizes; // Reuse the indices memref to store indices.
createFuncCall(rewriter, loc, getNextFuncName, {}, {reader, indices, value},
EmitCInterface::On);
- SmallVector<Value> indicesArray;
+ SmallVector<Value> indicesArray(rank, Value());
for (uint64_t i = 0; i < rank; i++) {
- indicesArray.push_back(rewriter.create<memref::LoadOp>(
- loc, indices, constantIndex(rewriter, loc, i)));
+ indicesArray[toStoredDim(encDst, i)] = rewriter.create<memref::LoadOp>(
+ loc, indices, constantIndex(rewriter, loc, i));
}
Value v = rewriter.create<memref::LoadOp>(loc, value);
Value t = rewriter.create<InsertOp>(loc, v, forOp.getRegionIterArg(0),
diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
index ceb9c085dca52..00811d4a0a892 100644
--- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
@@ -5,6 +5,11 @@
dimLevelType = ["dense", "compressed"]
}>
+#CSC = #sparse_tensor.encoding<{
+ dimLevelType = [ "dense", "compressed" ],
+ dimOrdering = affine_map<(i, j) -> (j, i)>
+}>
+
// CHECK-LABEL: func.func @sparse_new_symmetry(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> {
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -75,6 +80,37 @@ func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSR> {
return %0 : tensor<?x?xf32, #CSR>
}
+// CHECK-LABEL: func.func @sparse_new_csc(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> {
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK: %[[R:.*]] = call @createSparseTensorReader(%[[A]])
+// CHECK: %[[DS:.*]] = memref.alloca(%[[C2]]) : memref<?xindex>
+// CHECK: call @copySparseTensorReaderDimSizes(%[[R]], %[[DS]])
+// CHECK: %[[D0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]]
+// CHECK: %[[D1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]]
+// CHECK: %[[T:.*]] = bufferization.alloc_tensor(%[[D0]], %[[D1]])
+// CHECK: %[[N:.*]] = call @getSparseTensorReaderNNZ(%[[R]])
+// CHECK: %[[VB:.*]] = memref.alloca()
+// CHECK: %[[T2:.*]] = scf.for %{{.*}} = %[[C0]] to %[[N]] step %[[C1]] iter_args(%[[A2:.*]] = %[[T]])
+// CHECK: func.call @getSparseTensorReaderNextF32(%[[R]], %[[DS]], %[[VB]])
+// CHECK: %[[E0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]]
+// CHECK: %[[E1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]]
+// CHECK: %[[V:.*]] = memref.load %[[VB]][]
+// CHECK: %[[T1:.*]] = sparse_tensor.insert %[[V]] into %[[A2]]{{\[}}%[[E1]], %[[E0]]]
+// CHECK: scf.yield %[[T1]]
+// CHECK: }
+// CHECK: call @delSparseTensorReader(%[[R]])
+// CHECK: %[[T4:.*]] = sparse_tensor.load %[[T2]] hasInserts
+// CHECK: %[[R:.*]] = sparse_tensor.convert %[[T4]]
+// CHECK: bufferization.dealloc_tensor %[[T4]]
+// CHECK: return %[[R]]
+func.func @sparse_new_csc(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSC> {
+ %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #CSC>
+ return %0 : tensor<?x?xf32, #CSC>
+}
+
// CHECK-LABEL: func.func @sparse_out(
// CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>,
// CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>) {
More information about the Mlir-commits
mailing list