[Mlir-commits] [mlir] 5389cdc - [mlir][sparse] Adding dynamic-size support for sparse=>dense conversion

wren romano llvmlistbot at llvm.org
Thu Oct 28 16:56:24 PDT 2021


Author: wren romano
Date: 2021-10-28T16:56:18-07:00
New Revision: 5389cdc8f67a50ec5678a37246e319dcb208c23a

URL: https://github.com/llvm/llvm-project/commit/5389cdc8f67a50ec5678a37246e319dcb208c23a
DIFF: https://github.com/llvm/llvm-project/commit/5389cdc8f67a50ec5678a37246e319dcb208c23a.diff

LOG: [mlir][sparse] Adding dynamic-size support for sparse=>dense conversion

Depends On D110790

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D112674

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 694853f7cd11..5d6f16524920 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -423,14 +423,20 @@ static Value genIndexAndValueForSparse(ConversionPatternRewriter &rewriter,
 }
 
 /// Generates code to allocate a tensor of the given type, and zero
-/// initialize it.  This function assumes the TensorType is fully
-/// specified (i.e., has static rank and sizes).
-// TODO(D112674): support dynamic sizes.
+/// initialize it.  If the tensor type has any dynamic sizes, then the
+/// `sizes` parameter should be as filled by sizesFromPtr(); that way
+/// we can reuse the genDimSizeCall() results generated by sizesFromPtr().
 static Value allocDenseTensor(ConversionPatternRewriter &rewriter, Location loc,
-                              RankedTensorType tensorTp) {
+                              RankedTensorType tensorTp, ValueRange sizes) {
   Type elemTp = tensorTp.getElementType();
-  auto memTp = MemRefType::get(tensorTp.getShape(), elemTp);
-  Value mem = rewriter.create<memref::AllocOp>(loc, memTp);
+  auto shape = tensorTp.getShape();
+  auto memTp = MemRefType::get(shape, elemTp);
+  SmallVector<Value> dynamicSizes;
+  for (unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
+    if (shape[i] == ShapedType::kDynamicSize)
+      dynamicSizes.push_back(sizes[i]);
+  }
+  Value mem = rewriter.create<memref::AllocOp>(loc, memTp, dynamicSizes);
   Value zero = constantZero(rewriter, loc, elemTp);
   rewriter.create<linalg::FillOp>(loc, zero, mem).result();
   return mem;
@@ -595,9 +601,6 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
       if (!tensorTp)
         return failure();
       unsigned rank = tensorTp.getRank();
-      Value dst = allocDenseTensor(rewriter, loc, tensorTp);
-      Value ind = genAlloca(rewriter, loc, rank, rewriter.getIndexType());
-      Value elemPtr = genAllocaScalar(rewriter, loc, tensorTp.getElementType());
       encDst = SparseTensorEncodingAttr::get(
           op->getContext(),
           SmallVector<SparseTensorEncodingAttr::DimLevelType>(
@@ -605,10 +608,12 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
           AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth());
       SmallVector<Value, 4> sizes;
       SmallVector<Value, 8> params;
-      // TODO(D112674): support dynamic sizes.
-      sizesFromType(rewriter, sizes, loc, tensorTp);
+      sizesFromPtr(rewriter, sizes, op, encSrc, tensorTp, src);
       newParams(rewriter, params, op, encDst, kToIter, sizes, src);
       Value iter = genNewCall(rewriter, op, params);
+      Value ind = genAlloca(rewriter, loc, rank, rewriter.getIndexType());
+      Value elemPtr = genAllocaScalar(rewriter, loc, tensorTp.getElementType());
+      Value dst = allocDenseTensor(rewriter, loc, tensorTp, sizes);
       SmallVector<Value> noArgs;
       SmallVector<Type> noTypes;
       auto whileOp = rewriter.create<scf::WhileOp>(loc, noTypes, noArgs);

diff  --git a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
index 3c3d0602c65f..377e13794e5a 100644
--- a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
@@ -17,31 +17,26 @@
 //  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<13xi32>
 //   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[I13:.*]] = arith.constant 13 : index
-//
-//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<13xi32>
-//   CHECK-DAG: %[[E0:.*]] = arith.constant 0 : i32
-//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : i32, memref<13xi32>
-//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
-//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
-//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
-//
 //   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8>
 //   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref<?xi8>
 //   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
 //   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8>
-//
 //   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I13]], %[[SizesS]][%[[I0]]] : memref<1xindex>
-//
 //   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
-//
 //   CHECK-DAG: %[[SecTp:.*]] = arith.constant 1 : i32
 //   CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32
 //   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
-//       CHECK: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[SecTp]], %[[SecTp]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[SecTp]], %[[SecTp]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<13xi32>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0 : i32
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : i32, memref<13xi32>
 //       CHECK: scf.while : () -> () {
 //       CHECK:   %[[Cond:.*]] = call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<i32>) -> i1
 //       CHECK:   scf.condition(%[[Cond]])
@@ -58,38 +53,73 @@ func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32>
   return %0 : tensor<13xi32>
 }
 
+// CHECK-LABEL: func @sparse_convert_1d_dyn(
+//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?xi32>
+//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
+//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8>
+//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref<?xi8>
+//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8>
+//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
+//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<1xindex>
+//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
+//   CHECK-DAG: %[[SecTp:.*]] = arith.constant 1 : i32
+//   CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32
+//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[SecTp]], %[[SecTp]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref<?xi32>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0 : i32
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : i32, memref<?xi32>
+//       CHECK: scf.while : () -> () {
+//       CHECK:   %[[Cond:.*]] = call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<i32>) -> i1
+//       CHECK:   scf.condition(%[[Cond]])
+//       CHECK: } do {
+//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<1xindex>
+//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<i32>
+//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<?xi32>
+//       CHECK:   scf.yield
+//       CHECK: }
+//       CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<?xi32>
+//       CHECK: return %[[T]] : tensor<?xi32>
+func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
+  %0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
+  return %0 : tensor<?xi32>
+}
+
 // CHECK-LABEL: func @sparse_convert_2d(
 //  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x4xf64>
 //   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
 //   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
-//
-//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x4xf64>
-//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
-//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x4xf64>
-//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
-//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
-//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
-//
 //   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
 //   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
 //   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
 //   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
 //   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
-//
 //   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex>
 //   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex>
-//
 //   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
 //   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
-//
 //   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
-//       CHECK: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x4xf64>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x4xf64>
 //       CHECK: scf.while : () -> () {
 //       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
 //       CHECK:   scf.condition(%[[Cond]])
@@ -107,6 +137,138 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64
   return %0 : tensor<2x4xf64>
 }
 
+// CHECK-LABEL: func @sparse_convert_2d_dyn0(
+//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?x4xf64>
+//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
+//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
+//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
+//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
+//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
+//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
+//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
+//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex>
+//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
+//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref<?x4xf64>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<?x4xf64>
+//       CHECK: scf.while : () -> () {
+//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
+//       CHECK:   scf.condition(%[[Cond]])
+//       CHECK: } do {
+//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
+//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
+//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
+//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x4xf64>
+//       CHECK:   scf.yield
+//       CHECK: }
+//       CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<?x4xf64>
+//       CHECK: return %[[T]] : tensor<?x4xf64>
+func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
+  %0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
+  return %0 : tensor<?x4xf64>
+}
+
+// CHECK-LABEL: func @sparse_convert_2d_dyn1(
+//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x?xf64>
+//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
+//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
+//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
+//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
+//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
+//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
+//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr<i8>, index) -> index
+//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex>
+//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
+//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI1]]) : memref<2x?xf64>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x?xf64>
+//       CHECK: scf.while : () -> () {
+//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
+//       CHECK:   scf.condition(%[[Cond]])
+//       CHECK: } do {
+//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
+//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
+//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
+//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x?xf64>
+//       CHECK:   scf.yield
+//       CHECK: }
+//       CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<2x?xf64>
+//       CHECK: return %[[T]] : tensor<2x?xf64>
+func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
+  %0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
+  return %0 : tensor<2x?xf64>
+}
+
+// CHECK-LABEL: func @sparse_convert_2d_dyn2(
+//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf64>
+//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
+//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
+//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
+//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
+//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
+//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
+//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
+//   CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr<i8>, index) -> index
+//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex>
+//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
+//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]], %[[SizeI1]]) : memref<?x?xf64>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<?x?xf64>
+//       CHECK: scf.while : () -> () {
+//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
+//       CHECK:   scf.condition(%[[Cond]])
+//       CHECK: } do {
+//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
+//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
+//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
+//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x?xf64>
+//       CHECK:   scf.yield
+//       CHECK: }
+//       CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<?x?xf64>
+//       CHECK: return %[[T]] : tensor<?x?xf64>
+func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
+  %0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
+  return %0 : tensor<?x?xf64>
+}
+
 // CHECK-LABEL: func @sparse_convert_3d(
 //  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x3x4xf64>
 //   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
@@ -114,35 +276,30 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64
 //   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
 //   CHECK-DAG: %[[I3:.*]] = arith.constant 3 : index
 //   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
-//
-//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x3x4xf64>
-//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
-//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x3x4xf64>
-//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<3xindex>
-//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<3xindex> to memref<?xindex>
-//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
-//
 //   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<3xi8>
 //   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<3xi8> to memref<?xi8>
 //   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
 //   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<3xi8>
 //   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<3xi8>
 //   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I2]]] : memref<3xi8>
-//
 //   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<3xindex>
 //   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<3xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<3xindex>
 //   CHECK-DAG: memref.store %[[I3]], %[[SizesS]][%[[I1]]] : memref<3xindex>
 //   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I2]]] : memref<3xindex>
-//
 //   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<3xindex>
 //   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<3xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<3xindex>
 //   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<3xindex>
 //   CHECK-DAG: memref.store %[[I2]], %[[PermS]][%[[I2]]] : memref<3xindex>
-//
 //   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
-//       CHECK: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<3xindex>
+//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<3xindex> to memref<?xindex>
+//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
+//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x3x4xf64>
+//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
+//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x3x4xf64>
 //       CHECK: scf.while : () -> () {
 //       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
 //       CHECK:   scf.condition(%[[Cond]])


        


More information about the Mlir-commits mailing list