[Mlir-commits] [mlir] 6607fdf - [mlir][sparse] add memSizes array to sparse storage format

Aart Bik llvmlistbot at llvm.org
Mon Sep 12 14:04:12 PDT 2022


Author: Aart Bik
Date: 2022-09-12T14:04:01-07:00
New Revision: 6607fdf7490c5bf73a8892bc08ed2dba55043ca8

URL: https://github.com/llvm/llvm-project/commit/6607fdf7490c5bf73a8892bc08ed2dba55043ca8
DIFF: https://github.com/llvm/llvm-project/commit/6607fdf7490c5bf73a8892bc08ed2dba55043ca8.diff

LOG: [mlir][sparse] add memSizes array to sparse storage format

Rationale:
For every dynamic memref (memref<?xtype>), the stored size really
indicates the capacity and the entry in the memSizes indicates
the actual size. This allows us to use memref's as "vectors".

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D133724

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/test/Dialect/SparseTensor/codegen.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 4ac2d1775a811..d5c6d8a276728 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -101,6 +101,42 @@ static Optional<Value> sizeFromTensorAtDim(OpBuilder &rewriter, Location loc,
       .getResult();
 }
 
+/// Returns field index of sparse tensor type for pointers/indices, when set.
+static unsigned getFieldIndex(Type type, unsigned ptrDim, unsigned idxDim) {
+  auto enc = getSparseTensorEncoding(type);
+  assert(enc);
+  RankedTensorType rType = type.cast<RankedTensorType>();
+  unsigned field = 2; // start past sizes
+  unsigned ptr = 0;
+  unsigned idx = 0;
+  for (unsigned r = 0, rank = rType.getShape().size(); r < rank; r++) {
+    switch (enc.getDimLevelType()[r]) {
+    case SparseTensorEncodingAttr::DimLevelType::Dense:
+      break; // no fields
+    case SparseTensorEncodingAttr::DimLevelType::Compressed:
+    case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
+    case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
+    case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
+      if (ptr++ == ptrDim)
+        return field;
+      field++;
+      if (idx++ == idxDim)
+        return field;
+      field++;
+      break;
+    case SparseTensorEncodingAttr::DimLevelType::Singleton:
+    case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
+    case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
+    case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
+      if (idx++ == idxDim)
+        return field;
+      field++;
+      break;
+    }
+  }
+  return field + 1; // return values field index
+}
+
 /// Maps a sparse tensor type to the appropriate compounded buffers.
 static Optional<LogicalResult>
 convertSparseTensorType(Type type, SmallVectorImpl<Type> &fields) {
@@ -118,10 +154,13 @@ convertSparseTensorType(Type type, SmallVectorImpl<Type> &fields) {
   Type eltType = rType.getElementType();
   //
   // Sparse tensor storage for rank-dimensional tensor is organized as a
-  // single compound type with the following fields:
+  // single compound type with the following fields. Note that every
+  // memref with ? size actually behaves as a "vector", i.e. the stored
+  // size is the capacity and the used size resides in the memSizes array.
   //
   // struct {
   //   memref<rank x index> dimSizes     ; size in each dimension
+  //   memref<n x index> memSizes        ; sizes of ptrs/inds/values
   //   ; per-dimension d:
   //   ;  if dense:
   //        <nothing>
@@ -136,6 +175,9 @@ convertSparseTensorType(Type type, SmallVectorImpl<Type> &fields) {
   unsigned rank = rType.getShape().size();
   // The dimSizes array.
   fields.push_back(MemRefType::get({rank}, indexType));
+  // The memSizes array.
+  unsigned lastField = getFieldIndex(type, -1, -1);
+  fields.push_back(MemRefType::get({lastField - 2}, indexType));
   // Per-dimension storage.
   for (unsigned r = 0; r < rank; r++) {
     // Dimension level types apply in order to the reordered dimension.
@@ -162,46 +204,10 @@ convertSparseTensorType(Type type, SmallVectorImpl<Type> &fields) {
   }
   // The values array.
   fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, eltType));
+  assert(fields.size() == lastField);
   return success();
 }
 
-// Returns field index of sparse tensor type for pointers/indices, when set.
-static unsigned getFieldIndex(Type type, unsigned ptrDim, unsigned idxDim) {
-  auto enc = getSparseTensorEncoding(type);
-  assert(enc);
-  RankedTensorType rType = type.cast<RankedTensorType>();
-  unsigned field = 1; // start at DimSizes;
-  unsigned ptr = 0;
-  unsigned idx = 0;
-  for (unsigned r = 0, rank = rType.getShape().size(); r < rank; r++) {
-    switch (enc.getDimLevelType()[r]) {
-    case SparseTensorEncodingAttr::DimLevelType::Dense:
-      break; // no fields
-    case SparseTensorEncodingAttr::DimLevelType::Compressed:
-    case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
-    case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
-    case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
-      if (ptr++ == ptrDim)
-        return field;
-      field++;
-      if (idx++ == idxDim)
-        return field;
-      field++;
-      break;
-    case SparseTensorEncodingAttr::DimLevelType::Singleton:
-    case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
-    case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
-    case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
-      if (idx++ == idxDim)
-        return field;
-      field++;
-      break;
-    }
-  }
-  llvm_unreachable("failed to find ptr/idx field index");
-  return -1;
-}
-
 /// Create allocation operation.
 static Value createAllocation(OpBuilder &builder, Location loc, Type type,
                               Value sz) {
@@ -209,11 +215,12 @@ static Value createAllocation(OpBuilder &builder, Location loc, Type type,
   return builder.create<memref::AllocOp>(loc, memType, sz);
 }
 
-/// Creates allocation for each field in sparse tensor type.
+/// Creates allocation for each field in sparse tensor type. Note that
+/// for all dynamic memrefs, the memory size is really the capacity of
+/// the "vector", while the actual size resides in the sizes array.
 ///
 /// TODO: for efficiency, we will need heuristis to make educated guesses
-///       on the required final sizes; also, we will need an improved
-///       memory allocation scheme with capacity and reallocation
+///       on the required capacities
 ///
 static void createAllocFields(OpBuilder &builder, Location loc, Type type,
                               ValueRange dynSizes,
@@ -246,6 +253,11 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
   Value dimSizes =
       builder.create<memref::AllocOp>(loc, MemRefType::get({rank}, indexType));
   fields.push_back(dimSizes);
+  // The sizes array.
+  unsigned lastField = getFieldIndex(type, -1, -1);
+  Value memSizes = builder.create<memref::AllocOp>(
+      loc, MemRefType::get({lastField - 2}, indexType));
+  fields.push_back(memSizes);
   // Per-dimension storage.
   for (unsigned r = 0; r < rank; r++) {
     // Get the original dimension (ro) for the current stored dimension.
@@ -278,6 +290,16 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
   // In all other case, we resort to the heuristical initial value.
   Value valuesSz = allDense ? linear : heuristic;
   fields.push_back(createAllocation(builder, loc, eltType, valuesSz));
+  // Set memSizes.
+  if (allDense)
+    builder.create<memref::StoreOp>(
+        loc, valuesSz, memSizes,
+        constantIndex(builder, loc, 0)); // TODO: avoid memSizes in this case?
+  else
+    builder.create<linalg::FillOp>(
+        loc, ValueRange{constantZero(builder, loc, indexType)},
+        ValueRange{memSizes});
+  assert(fields.size() == lastField);
 }
 
 //===----------------------------------------------------------------------===//
@@ -467,28 +489,6 @@ class SparseTensorLoadConverter : public OpConversionPattern<LoadOp> {
   }
 };
 
-/// Base class for getter-like operations, e.g., to_indices, to_pointers.
-template <typename SourceOp, typename Base>
-class SparseGetterOpConverter : public OpConversionPattern<SourceOp> {
-public:
-  using OpAdaptor = typename SourceOp::Adaptor;
-  using OpConversionPattern<SourceOp>::OpConversionPattern;
-  LogicalResult
-  matchAndRewrite(SourceOp op, OpAdaptor adaptor,
-                  ConversionPatternRewriter &rewriter) const override {
-    // Replace the requested pointer access with corresponding field.
-    // The cast_op is inserted by type converter to intermix 1:N type
-    // conversion.
-    auto tuple = llvm::cast<UnrealizedConversionCastOp>(
-        adaptor.getTensor().getDefiningOp());
-    unsigned idx = Base::getIndexForOp(tuple, op);
-    auto fields = tuple.getInputs();
-    assert(idx < fields.size());
-    rewriter.replaceOp(op, fields[idx]);
-    return success();
-  }
-};
-
 /// Sparse codegen rule for the expand op.
 class SparseExpandConverter : public OpConversionPattern<ExpandOp> {
 public:
@@ -543,6 +543,28 @@ class SparseExpandConverter : public OpConversionPattern<ExpandOp> {
   }
 };
 
+/// Base class for getter-like operations, e.g., to_indices, to_pointers.
+template <typename SourceOp, typename Base>
+class SparseGetterOpConverter : public OpConversionPattern<SourceOp> {
+public:
+  using OpAdaptor = typename SourceOp::Adaptor;
+  using OpConversionPattern<SourceOp>::OpConversionPattern;
+  LogicalResult
+  matchAndRewrite(SourceOp op, OpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+    // Replace the requested pointer access with corresponding field.
+    // The cast_op is inserted by type converter to intermix 1:N type
+    // conversion.
+    auto tuple = llvm::cast<UnrealizedConversionCastOp>(
+        adaptor.getTensor().getDefiningOp());
+    unsigned idx = Base::getIndexForOp(tuple, op);
+    auto fields = tuple.getInputs();
+    assert(idx < fields.size());
+    rewriter.replaceOp(op, fields[idx]);
+    return success();
+  }
+};
+
 /// Sparse codegen rule for pointer accesses.
 class SparseToPointersConverter
     : public SparseGetterOpConverter<ToPointersOp, SparseToPointersConverter> {
@@ -602,9 +624,9 @@ mlir::SparseTensorTypeToBufferConverter::SparseTensorTypeToBufferConverter() {
 void mlir::populateSparseTensorCodegenPatterns(TypeConverter &typeConverter,
                                                RewritePatternSet &patterns) {
   patterns.add<SparseReturnConverter, SparseCallConverter, SparseDimOpConverter,
-               SparseCastConverter, SparseExpandConverter,
-               SparseTensorAllocConverter, SparseTensorDeallocConverter,
-               SparseToPointersConverter, SparseToIndicesConverter,
-               SparseToValuesConverter, SparseTensorLoadConverter>(
+               SparseCastConverter, SparseTensorAllocConverter,
+               SparseTensorDeallocConverter, SparseTensorLoadConverter,
+               SparseExpandConverter, SparseToPointersConverter,
+               SparseToIndicesConverter, SparseToValuesConverter>(
       typeConverter, patterns.getContext());
 }

diff  --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index a2bd75429d484..6a8a3ca7a56a5 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -42,24 +42,27 @@
 
 // CHECK-LABEL: func @sparse_nop(
 //  CHECK-SAME: %[[A0:.*0]]: memref<1xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>)
-//       CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]] : memref<1xindex>, memref<?xi32>, memref<?xi64>, memref<?xf64>
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf64>)
+//       CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]] : memref<1xindex>, memref<3xindex>, memref<?xi32>, memref<?xi64>, memref<?xf64>
 func.func @sparse_nop(%arg0: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
   return %arg0 : tensor<?xf64, #SparseVector>
 }
 
 // CHECK-LABEL: func @sparse_nop_multi_ret(
 //  CHECK-SAME: %[[A0:.*0]]: memref<1xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>,
-//  CHECK-SAME: %[[A4:.*4]]: memref<1xindex>,
-//  CHECK-SAME: %[[A5:.*5]]: memref<?xi32>,
-//  CHECK-SAME: %[[A6:.*6]]: memref<?xi64>,
-//  CHECK-SAME: %[[A7:.*7]]: memref<?xf64>) ->
-//       CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]], %[[A7]]
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf64>,
+//  CHECK-SAME: %[[A5:.*5]]: memref<1xindex>,
+//  CHECK-SAME: %[[A6:.*6]]: memref<3xindex>,
+//  CHECK-SAME: %[[A7:.*7]]: memref<?xi32>,
+//  CHECK-SAME: %[[A8:.*8]]: memref<?xi64>,
+//  CHECK-SAME: %[[A9:.*9]]: memref<?xf64>) ->
+//       CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]], %[[A7]], %[[A8]], %[[A9]]
 func.func @sparse_nop_multi_ret(%arg0: tensor<?xf64, #SparseVector>,
                                 %arg1: tensor<?xf64, #SparseVector>) ->
                                 (tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>) {
@@ -68,15 +71,17 @@ func.func @sparse_nop_multi_ret(%arg0: tensor<?xf64, #SparseVector>,
 
 // CHECK-LABEL: func @sparse_nop_call(
 //  CHECK-SAME: %[[A0:.*0]]: memref<1xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>,
-//  CHECK-SAME: %[[A4:.*4]]: memref<1xindex>,
-//  CHECK-SAME: %[[A5:.*5]]: memref<?xi32>,
-//  CHECK-SAME: %[[A6:.*6]]: memref<?xi64>,
-//  CHECK-SAME: %[[A7:.*7]]: memref<?xf64>) 
-//       CHECK: %[[T0:.*]]:8 = call @sparse_nop_multi_ret(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]], %[[A7]]) 
-//       CHECK: return %[[T0]]#0, %[[T0]]#1, %[[T0]]#2, %[[T0]]#3, %[[T0]]#4, %[[T0]]#5, %[[T0]]#6, %[[T0]]#7 
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf64>,
+//  CHECK-SAME: %[[A5:.*5]]: memref<1xindex>,
+//  CHECK-SAME: %[[A6:.*6]]: memref<3xindex>,
+//  CHECK-SAME: %[[A7:.*7]]: memref<?xi32>,
+//  CHECK-SAME: %[[A8:.*8]]: memref<?xi64>,
+//  CHECK-SAME: %[[A9:.*9]]: memref<?xf64>)
+//       CHECK: %[[T0:.*]]:10 = call @sparse_nop_multi_ret(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]], %[[A7]], %[[A8]], %[[A9]])
+//       CHECK: return %[[T0]]#0, %[[T0]]#1, %[[T0]]#2, %[[T0]]#3, %[[T0]]#4, %[[T0]]#5, %[[T0]]#6, %[[T0]]#7, %[[T0]]#8, %[[T0]]#9
 func.func @sparse_nop_call(%arg0: tensor<?xf64, #SparseVector>,
                            %arg1: tensor<?xf64, #SparseVector>) ->
                            (tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>) {
@@ -86,67 +91,67 @@ func.func @sparse_nop_call(%arg0: tensor<?xf64, #SparseVector>,
   return %1, %2: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>
 }
 
-//
 // CHECK-LABEL: func @sparse_nop_cast(
 //  CHECK-SAME: %[[A0:.*0]]: memref<1xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf32>)
-//       CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]] : memref<1xindex>, memref<?xi32>, memref<?xi64>, memref<?xf32>
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf32>)
+//       CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]] : memref<1xindex>, memref<3xindex>, memref<?xi32>, memref<?xi64>, memref<?xf32>
 func.func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
   %0 = tensor.cast %arg0 : tensor<64xf32, #SparseVector> to tensor<?xf32, #SparseVector>
   return %0 : tensor<?xf32, #SparseVector>
 }
 
-//
 // CHECK-LABEL: func @sparse_nop_cast_3d(
 //  CHECK-SAME: %[[A0:.*0]]: memref<3xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xf32>)
-//       CHECK: return %[[A0]], %[[A1]] : memref<3xindex>, memref<?xf32>
+//  CHECK-SAME: %[[A1:.*1]]: memref<1xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xf32>)
+//       CHECK: return %[[A0]], %[[A1]], %[[A2]] : memref<3xindex>, memref<1xindex>, memref<?xf32>
 func.func @sparse_nop_cast_3d(%arg0: tensor<10x20x30xf32, #Dense3D>) -> tensor<?x?x?xf32, #Dense3D> {
   %0 = tensor.cast %arg0 : tensor<10x20x30xf32, #Dense3D> to tensor<?x?x?xf32, #Dense3D>
   return %0 : tensor<?x?x?xf32, #Dense3D>
 }
 
-//
 // CHECK-LABEL: func @sparse_dense_2d(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xf64>) {
+//  CHECK-SAME: %[[A1:.*1]]: memref<1xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xf64>)
 //       CHECK: return
 func.func @sparse_dense_2d(%arg0: tensor<?x?xf64, #Dense2D>) {
   return
 }
 
-//
 // CHECK-LABEL: func @sparse_row(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>) {
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf64>)
 //       CHECK: return
 func.func @sparse_row(%arg0: tensor<?x?xf64, #Row>) {
   return
 }
 
-//
 // CHECK-LABEL: func @sparse_csr(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>) {
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf64>)
 //       CHECK: return
 func.func @sparse_csr(%arg0: tensor<?x?xf64, #CSR>) {
   return
 }
 
-//
 // CHECK-LABEL: func @sparse_dcsr(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xi32>,
-//  CHECK-SAME: %[[A4:.*4]]: memref<?xi64>,
-//  CHECK-SAME: %[[A5:.*5]]: memref<?xf64>) {
+//  CHECK-SAME: %[[A1:.*1]]: memref<5xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xi32>,
+//  CHECK-SAME: %[[A5:.*5]]: memref<?xi64>,
+//  CHECK-SAME: %[[A6:.*6]]: memref<?xf64>)
 //       CHECK: return
 func.func @sparse_dcsr(%arg0: tensor<?x?xf64, #DCSR>) {
   return
@@ -156,10 +161,10 @@ func.func @sparse_dcsr(%arg0: tensor<?x?xf64, #DCSR>) {
 // Querying for dimension 1 in the tensor type can immediately
 // fold using the original static dimension sizes.
 //
-//
 // CHECK-LABEL: func @sparse_dense_3d(
 //  CHECK-SAME: %[[A0:.*0]]: memref<3xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xf64>)
+//  CHECK-SAME: %[[A1:.*1]]: memref<1xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xf64>)
 //       CHECK: %[[C:.*]] = arith.constant 20 : index
 //       CHECK: return %[[C]] : index
 func.func @sparse_dense_3d(%arg0: tensor<10x20x30xf64, #Dense3D>) -> index {
@@ -173,10 +178,10 @@ func.func @sparse_dense_3d(%arg0: tensor<10x20x30xf64, #Dense3D>) -> index {
 // into querying for dimension 2 in the stored sparse tensor scheme,
 // since the latter honors the dimOrdering.
 //
-//
 // CHECK-LABEL: func @sparse_dense_3d_dyn(
 //  CHECK-SAME: %[[A0:.*0]]: memref<3xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xf64>)
+//  CHECK-SAME: %[[A1:.*1]]: memref<1xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xf64>)
 //       CHECK: %[[C:.*]] = arith.constant 2 : index
 //       CHECK: %[[L:.*]] = memref.load %[[A0]][%[[C]]] : memref<3xindex>
 //       CHECK: return %[[L]] : index
@@ -186,115 +191,121 @@ func.func @sparse_dense_3d_dyn(%arg0: tensor<?x?x?xf64, #Dense3D>) -> index {
   return %0 : index
 }
 
-//
 // CHECK-LABEL: func @sparse_pointers_dcsr(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xi32>,
-//  CHECK-SAME: %[[A4:.*4]]: memref<?xi64>,
-//  CHECK-SAME: %[[A5:.*5]]: memref<?xf64>)
-//       CHECK: return %[[A3]] : memref<?xi32>
+//  CHECK-SAME: %[[A1:.*1]]: memref<5xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xi32>,
+//  CHECK-SAME: %[[A5:.*5]]: memref<?xi64>,
+//  CHECK-SAME: %[[A6:.*6]]: memref<?xf64>)
+//       CHECK: return %[[A4]] : memref<?xi32>
 func.func @sparse_pointers_dcsr(%arg0: tensor<?x?xf64, #DCSR>) -> memref<?xi32> {
   %0 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<?x?xf64, #DCSR> to memref<?xi32>
   return %0 : memref<?xi32>
 }
 
-//
 // CHECK-LABEL: func @sparse_indices_dcsr(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xi32>,
-//  CHECK-SAME: %[[A4:.*4]]: memref<?xi64>,
-//  CHECK-SAME: %[[A5:.*5]]: memref<?xf64>)
-//       CHECK: return %[[A4]] : memref<?xi64>
+//  CHECK-SAME: %[[A1:.*1]]: memref<5xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xi32>,
+//  CHECK-SAME: %[[A5:.*5]]: memref<?xi64>,
+//  CHECK-SAME: %[[A6:.*6]]: memref<?xf64>)
+//       CHECK: return %[[A5]] : memref<?xi64>
 func.func @sparse_indices_dcsr(%arg0: tensor<?x?xf64, #DCSR>) -> memref<?xi64> {
   %0 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<?x?xf64, #DCSR> to memref<?xi64>
   return %0 : memref<?xi64>
 }
 
-//
 // CHECK-LABEL: func @sparse_values_dcsr(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xi32>,
-//  CHECK-SAME: %[[A4:.*4]]: memref<?xi64>,
-//  CHECK-SAME: %[[A5:.*5]]: memref<?xf64>)
-//       CHECK: return %[[A5]] : memref<?xf64>
+//  CHECK-SAME: %[[A1:.*1]]: memref<5xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xi32>,
+//  CHECK-SAME: %[[A5:.*5]]: memref<?xi64>,
+//  CHECK-SAME: %[[A6:.*6]]: memref<?xf64>)
+//       CHECK: return %[[A6]] : memref<?xf64>
 func.func @sparse_values_dcsr(%arg0: tensor<?x?xf64, #DCSR>) -> memref<?xf64> {
   %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
   return %0 : memref<?xf64>
 }
 
-//
 // CHECK-LABEL: func @sparse_dealloc_csr(
 //  CHECK-SAME: %[[A0:.*0]]: memref<2xindex>,
-//  CHECK-SAME: %[[A1:.*1]]: memref<?xi32>,
-//  CHECK-SAME: %[[A2:.*2]]: memref<?xi64>,
-//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>) {
+//  CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xi32>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xi64>,
+//  CHECK-SAME: %[[A4:.*4]]: memref<?xf64>)
 //       CHECK: memref.dealloc %[[A0]] : memref<2xindex>
-//       CHECK: memref.dealloc %[[A1]] : memref<?xi32>
-//       CHECK: memref.dealloc %[[A2]] : memref<?xi64>
-//       CHECK: memref.dealloc %[[A3]] : memref<?xf64>
+//       CHECK: memref.dealloc %[[A1]] : memref<3xindex>
+//       CHECK: memref.dealloc %[[A2]] : memref<?xi32>
+//       CHECK: memref.dealloc %[[A3]] : memref<?xi64>
+//       CHECK: memref.dealloc %[[A4]] : memref<?xf64>
 //       CHECK: return
 func.func @sparse_dealloc_csr(%arg0: tensor<?x?xf64, #CSR>) {
   bufferization.dealloc_tensor %arg0 : tensor<?x?xf64, #CSR>
   return
 }
 
-//        CHECK-LABEL: func @sparse_alloc_csc(
-//         CHECK-SAME: %[[A:.*]]: index) ->
-//         CHECK-SAME: memref<2xindex>, memref<?xindex>, memref<?xindex>, memref<?xf64>
-//          CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-//          CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-//          CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
-//              CHECK: %[[T0:.*]] = memref.alloc() : memref<2xindex>
-//              CHECK: memref.store %[[A]], %[[T0]][%[[C0]]] : memref<2xindex>
-//              CHECK: memref.store %[[C10]], %[[T0]][%[[C1]]] : memref<2xindex>
-//              CHECK: %[[T1:.*]] = memref.alloc() : memref<1xindex>
-//              CHECK: %[[T2:.*]] = memref.cast %[[T1]] : memref<1xindex> to memref<?xindex>
-//              CHECK: %[[T3:.*]] = memref.alloc() : memref<1xindex>
-//              CHECK: %[[T4:.*]] = memref.cast %[[T3]] : memref<1xindex> to memref<?xindex>
-//              CHECK: %[[T5:.*]] = memref.alloc() : memref<1xf64>
-//              CHECK: %[[T6:.*]] = memref.cast %[[T5]] : memref<1xf64> to memref<?xf64>
-//              CHECK: return %[[T0]], %[[T2]], %[[T4]], %[[T6]]
+// CHECK-LABEL: func @sparse_alloc_csc(
+//  CHECK-SAME: %[[A:.*]]: index) ->
+//  CHECK-SAME: memref<2xindex>, memref<3xindex>, memref<?xindex>, memref<?xindex>, memref<?xf64>
+//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+//   CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
+//       CHECK: %[[T0:.*]] = memref.alloc() : memref<2xindex>
+//       CHECK: %[[T1:.*]] = memref.alloc() : memref<3xindex>
+//       CHECK: memref.store %[[A]], %[[T0]][%[[C0]]] : memref<2xindex>
+//       CHECK: memref.store %[[C10]], %[[T0]][%[[C1]]] : memref<2xindex>
+//       CHECK: %[[T2:.*]] = memref.alloc() : memref<1xindex>
+//       CHECK: %[[T3:.*]] = memref.cast %[[T2]] : memref<1xindex> to memref<?xindex>
+//       CHECK: %[[T4:.*]] = memref.alloc() : memref<1xindex>
+//       CHECK: %[[T5:.*]] = memref.cast %[[T4]] : memref<1xindex> to memref<?xindex>
+//       CHECK: %[[T6:.*]] = memref.alloc() : memref<1xf64>
+//       CHECK: %[[T7:.*]] = memref.cast %[[T6]] : memref<1xf64> to memref<?xf64>
+//       CHECK: linalg.fill ins(%[[C0]] : index) outs(%[[T1]] : memref<3xindex>)
+//       CHECK: return %[[T0]], %[[T1]], %[[T3]], %[[T5]], %[[T7]]
 func.func @sparse_alloc_csc(%arg0: index) -> tensor<10x?xf64, #CSC> {
   %0 = bufferization.alloc_tensor(%arg0) : tensor<10x?xf64, #CSC>
   %1 = sparse_tensor.load %0 : tensor<10x?xf64, #CSC>
   return %1 : tensor<10x?xf64, #CSC>
 }
 
-//        CHECK-LABEL: func @sparse_alloc_3d() ->
-//         CHECK-SAME: memref<3xindex>, memref<?xf64>
-//          CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-//          CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-//          CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
-//          CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
-//          CHECK-DAG: %[[C20:.*]] = arith.constant 20 : index
-//          CHECK-DAG: %[[C30:.*]] = arith.constant 30 : index
-//              CHECK: %[[A0:.*]] = memref.alloc() : memref<3xindex>
-//              CHECK: memref.store %[[C30]], %[[A0]][%[[C0]]] : memref<3xindex>
-//              CHECK: memref.store %[[C10]], %[[A0]][%[[C1]]] : memref<3xindex>
-//              CHECK: memref.store %[[C20]], %[[A0]][%[[C2]]] : memref<3xindex>
-//              CHECK: %[[A:.*]] = memref.alloc() : memref<6000xf64>
-//              CHECK: %[[A1:.*]] = memref.cast %[[A]] : memref<6000xf64> to memref<?xf64>
-//              CHECK: return %[[A0]], %[[A1]] : memref<3xindex>, memref<?xf64>
+// CHECK-LABEL: func @sparse_alloc_3d() ->
+//  CHECK-SAME: memref<3xindex>, memref<1xindex>, memref<?xf64>
+//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+//   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+//   CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
+//   CHECK-DAG: %[[C20:.*]] = arith.constant 20 : index
+//   CHECK-DAG: %[[C30:.*]] = arith.constant 30 : index
+//   CHECK-DAG: %[[C6000:.*]] = arith.constant 6000 : index
+//       CHECK: %[[A0:.*]] = memref.alloc() : memref<3xindex>
+//       CHECK: %[[A1:.*]] = memref.alloc() : memref<1xindex>
+//       CHECK: memref.store %[[C30]], %[[A0]][%[[C0]]] : memref<3xindex>
+//       CHECK: memref.store %[[C10]], %[[A0]][%[[C1]]] : memref<3xindex>
+//       CHECK: memref.store %[[C20]], %[[A0]][%[[C2]]] : memref<3xindex>
+//       CHECK: %[[A:.*]] = memref.alloc() : memref<6000xf64>
+//       CHECK: %[[A2:.*]] = memref.cast %[[A]] : memref<6000xf64> to memref<?xf64>
+//       CHECK: memref.store %[[C6000]], %[[A1]][%[[C0]]] : memref<1xindex>
+//       CHECK: return %[[A0]], %[[A1]], %[[A2]] : memref<3xindex>, memref<1xindex>, memref<?xf64>
 func.func @sparse_alloc_3d() -> tensor<10x20x30xf64, #Dense3D> {
   %0 = bufferization.alloc_tensor() : tensor<10x20x30xf64, #Dense3D>
   %1 = sparse_tensor.load %0 : tensor<10x20x30xf64, #Dense3D>
   return %1 : tensor<10x20x30xf64, #Dense3D>
 }
 
-//   CHECK-LABEL: func.func @sparse_expansion1()
-//         CHECK: %[[A:.*]] = memref.alloc() : memref<8xf64>
-//         CHECK: %[[B:.*]] = memref.alloc() : memref<8xi1>
-//         CHECK: %[[C:.*]] = memref.alloc() : memref<8xindex>
-//         CHECK: %[[D:.*]] = memref.cast %[[C]] : memref<8xindex> to memref<?xindex>
-//     CHECK-DAG: linalg.fill ins(%{{.*}}  : f64) outs(%[[A]] : memref<8xf64>)
-//     CHECK-DAG: linalg.fill ins(%{{.*}}  : i1) outs(%[[B]] : memref<8xi1>)
-//         CHECK: return %[[D]] : memref<?xindex>
+// CHECK-LABEL: func.func @sparse_expansion1()
+//       CHECK: %[[A:.*]] = memref.alloc() : memref<8xf64>
+//       CHECK: %[[B:.*]] = memref.alloc() : memref<8xi1>
+//       CHECK: %[[C:.*]] = memref.alloc() : memref<8xindex>
+//       CHECK: %[[D:.*]] = memref.cast %[[C]] : memref<8xindex> to memref<?xindex>
+//   CHECK-DAG: linalg.fill ins(%{{.*}}  : f64) outs(%[[A]] : memref<8xf64>)
+//   CHECK-DAG: linalg.fill ins(%{{.*}}  : i1) outs(%[[B]] : memref<8xi1>)
+//       CHECK: return %[[D]] : memref<?xindex>
 func.func @sparse_expansion1() -> memref<?xindex> {
   %0 = bufferization.alloc_tensor() : tensor<4x8xf64, #CSR>
   %values, %filled, %added, %count = sparse_tensor.expand %0
@@ -302,14 +313,14 @@ func.func @sparse_expansion1() -> memref<?xindex> {
   return %added : memref<?xindex>
 }
 
-//   CHECK-LABEL: func.func @sparse_expansion2()
-//         CHECK: %[[A:.*]] = memref.alloc() : memref<4xf64>
-//         CHECK: %[[B:.*]] = memref.alloc() : memref<4xi1>
-//         CHECK: %[[C:.*]] = memref.alloc() : memref<4xindex>
-//         CHECK: %[[D:.*]] = memref.cast %[[C]] : memref<4xindex> to memref<?xindex>
-//     CHECK-DAG: linalg.fill ins(%{{.*}}  : f64) outs(%[[A]] : memref<4xf64>)
-//     CHECK-DAG: linalg.fill ins(%{{.*}}  : i1) outs(%[[B]] : memref<4xi1>)
-//         CHECK: return %[[D]] : memref<?xindex>
+// CHECK-LABEL: func.func @sparse_expansion2()
+//       CHECK: %[[A:.*]] = memref.alloc() : memref<4xf64>
+//       CHECK: %[[B:.*]] = memref.alloc() : memref<4xi1>
+//       CHECK: %[[C:.*]] = memref.alloc() : memref<4xindex>
+//       CHECK: %[[D:.*]] = memref.cast %[[C]] : memref<4xindex> to memref<?xindex>
+//   CHECK-DAG: linalg.fill ins(%{{.*}}  : f64) outs(%[[A]] : memref<4xf64>)
+//   CHECK-DAG: linalg.fill ins(%{{.*}}  : i1) outs(%[[B]] : memref<4xi1>)
+//       CHECK: return %[[D]] : memref<?xindex>
 func.func @sparse_expansion2() -> memref<?xindex> {
   %0 = bufferization.alloc_tensor() : tensor<4x8xf64, #CSC>
   %values, %filled, %added, %count = sparse_tensor.expand %0
@@ -317,19 +328,19 @@ func.func @sparse_expansion2() -> memref<?xindex> {
   return %added : memref<?xindex>
 }
 
-//   CHECK-LABEL: func.func @sparse_expansion3(
-//    CHECK-SAME: %[[D0:.*]]: index,
-//    CHECK-SAME: %{{.*}}: index) -> memref<?xindex> {
-//         CHECK: %[[C1:.*]] = arith.constant 1 : index
-//         CHECK: %[[S0:.*]] = memref.alloc() : memref<2xindex>
-//         CHECK: memref.store %[[D0]], %[[S0]]{{\[}}%[[C1]]] : memref<2xindex>
-//         CHECK: %[[D1:.*]] = memref.load %[[S0]]{{\[}}%[[C1]]] : memref<2xindex>
-//         CHECK: %[[V:.*]] = memref.alloc(%[[D1]]) : memref<?xf64>
-//         CHECK: %[[B:.*]] = memref.alloc(%[[D1]]) : memref<?xi1>
-//         CHECK: %[[D:.*]] = memref.alloc(%[[D1]]) : memref<?xindex>
-//         CHECK: linalg.fill ins(%{{.*}} : f64) outs(%[[V]] : memref<?xf64>)
-//         CHECK: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
-//         CHECK: return %[[D]] : memref<?xindex>
+// CHECK-LABEL: func.func @sparse_expansion3(
+//  CHECK-SAME: %[[D0:.*]]: index,
+//  CHECK-SAME: %{{.*}}: index) -> memref<?xindex> {
+//       CHECK: %[[C1:.*]] = arith.constant 1 : index
+//       CHECK: %[[S0:.*]] = memref.alloc() : memref<2xindex>
+//       CHECK: memref.store %[[D0]], %[[S0]]{{\[}}%[[C1]]] : memref<2xindex>
+//       CHECK: %[[D1:.*]] = memref.load %[[S0]]{{\[}}%[[C1]]] : memref<2xindex>
+//       CHECK: %[[V:.*]] = memref.alloc(%[[D1]]) : memref<?xf64>
+//       CHECK: %[[B:.*]] = memref.alloc(%[[D1]]) : memref<?xi1>
+//       CHECK: %[[D:.*]] = memref.alloc(%[[D1]]) : memref<?xindex>
+//       CHECK: linalg.fill ins(%{{.*}} : f64) outs(%[[V]] : memref<?xf64>)
+//       CHECK: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
+//       CHECK: return %[[D]] : memref<?xindex>
 func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
   %0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #CSC>
   %values, %filled, %added, %count = sparse_tensor.expand %0


        


More information about the Mlir-commits mailing list