[Mlir-commits] [mlir] 90aa436 - [mlir][sparse] Add layout to the memref for the indices buffers to prepare for the AOS storage optimization for COO regions.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Jan 4 07:36:16 PST 2023


Author: bixia1
Date: 2023-01-04T07:36:11-08:00
New Revision: 90aa4362913d4afbf24b930c261a3578cb793e96

URL: https://github.com/llvm/llvm-project/commit/90aa4362913d4afbf24b930c261a3578cb793e96
DIFF: https://github.com/llvm/llvm-project/commit/90aa4362913d4afbf24b930c261a3578cb793e96.diff

LOG: [mlir][sparse] Add layout to the memref for the indices buffers to prepare for the AOS storage optimization for COO regions.

Fix relevant FileCheck tests.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D140742

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
    mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
    mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
    mlir/test/Dialect/SparseTensor/sorted_coo.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index dcd6854df44db..c7c0826499091 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -46,6 +46,11 @@ SparseTensorEncodingAttr getSparseTensorEncoding(Type type);
 /// dimension level type being unique.
 bool isUniqueCOOType(RankedTensorType tp);
 
+/// Returns the starting dimension for a trailing COO region that spans across
+/// at least two dimensions. If no such COO region is found, returns the rank
+/// of the tensor.
+unsigned getCOOStart(SparseTensorEncodingAttr enc);
+
 //
 // Dimension level types.
 //

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index ccb4a88453e1b..962353ec1ce05 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -264,22 +264,45 @@ mlir::sparse_tensor::getSparseTensorEncoding(Type type) {
   return nullptr;
 }
 
+/// Returns true iff the given sparse tensor encoding attribute has a trailing
+/// COO region starting at the given dimension.
+static bool isCOOType(SparseTensorEncodingAttr enc, uint64_t s, bool isUnique) {
+  uint64_t rank = enc.getDimLevelType().size();
+  assert(s < rank && "Dimension out of bounds");
+  if (!isCompressedDim(enc, s))
+    return false;
+
+  for (uint64_t i = s + 1; i < rank; ++i)
+    if (!isSingletonDim(enc, i))
+      return false;
+
+  // If isUnique is true, then make sure that the last dimension level is
+  // unique, that is, rank == 1 (unique the only compressed) and rank > 1
+  // (unique on the last singleton).
+  return !isUnique || isUniqueDLT(getDimLevelType(enc, rank - 1));
+}
+
 bool mlir::sparse_tensor::isUniqueCOOType(RankedTensorType tp) {
   SparseTensorEncodingAttr enc = getSparseTensorEncoding(tp);
-
   if (!enc)
     return false;
 
-  if (!isCompressedDim(tp, 0))
-    return false;
+  return isCOOType(enc, 0, /*isUnique=*/true);
+}
 
-  for (uint64_t i = 1, e = tp.getRank(); i < e; ++i)
-    if (!isSingletonDim(tp, i))
-      return false;
+unsigned mlir::sparse_tensor::getCOOStart(SparseTensorEncodingAttr enc) {
+  unsigned rank = enc.getDimLevelType().size();
+  if (rank <= 1)
+    return rank;
+
+  // We only consider COO region with at least two dimensions for the purpose
+  // of AOS storage optimization.
+  for (unsigned r = 0; r < rank - 1; r++) {
+    if (isCOOType(enc, r, /*isUnique=*/false))
+      return r;
+  }
 
-  // This works for rank == 1 (unique the only compressed) and rank > 1 (unique
-  // on the last singleton).
-  return isUniqueDim(tp, tp.getRank() - 1);
+  return rank;
 }
 
 uint64_t mlir::sparse_tensor::toOrigDim(SparseTensorEncodingAttr enc,

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 83df60ed097f9..7228a235324f5 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -524,3 +524,31 @@ void sparse_tensor::foreachInSparseConstant(
     callback(coords, val);
   }
 }
+
+Value sparse_tensor::genToPointers(OpBuilder &builder, Location loc,
+                                   Value tensor, uint64_t d) {
+  RankedTensorType srcTp = tensor.getType().cast<RankedTensorType>();
+  SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
+  Type ptrTp = get1DMemRefType(getPointerOverheadType(builder, encSrc),
+                               /*withLayout=*/false);
+  return builder.create<ToPointersOp>(loc, ptrTp, tensor,
+                                      builder.getIndexAttr(d));
+}
+
+Value sparse_tensor::genToIndices(OpBuilder &builder, Location loc,
+                                  Value tensor, uint64_t d, uint64_t cooStart) {
+  RankedTensorType srcTp = tensor.getType().cast<RankedTensorType>();
+  SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
+  Type indTp = get1DMemRefType(getIndexOverheadType(builder, encSrc),
+                               /*withLayout=*/d >= cooStart);
+  return builder.create<ToIndicesOp>(loc, indTp, tensor,
+                                     builder.getIndexAttr(d));
+}
+
+Value sparse_tensor::genToValues(OpBuilder &builder, Location loc,
+                                 Value tensor) {
+  RankedTensorType srcTp = tensor.getType().cast<RankedTensorType>();
+  Type valTp = get1DMemRefType(srcTp.getElementType(),
+                               /*withLayout=*/false);
+  return builder.create<ToValuesOp>(loc, valTp, tensor);
+}
\ No newline at end of file

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index da94b27c42d6e..4ec9c25db176e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -183,6 +183,17 @@ void genDenseTensorOrSparseConstantIterLoop(
 void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
                   Location loc, Value src);
 
+/// Generates a 1D MemRefType with a dynamic size. When withLayout is set, the
+/// returned memref has a layout has unknown strides and offsets. Otherwise,
+/// a memref with a standard unit stride zero offset layout is returned.
+inline MemRefType get1DMemRefType(Type etp, bool withLayout) {
+  auto layout = withLayout ? StridedLayoutAttr::StridedLayoutAttr::get(
+                                 etp.getContext(), ShapedType::kDynamic,
+                                 {ShapedType::kDynamic})
+                           : StridedLayoutAttr();
+  return MemRefType::get(ShapedType::kDynamic, etp, layout);
+}
+
 /// Scans to top of generated loop.
 Operation *getTop(Operation *op);
 
@@ -313,6 +324,18 @@ inline bool isZeroRankedTensorOrScalar(Type type) {
   return !rtp || rtp.getRank() == 0;
 }
 
+/// Infers the result type and generates ToPointersOp.
+Value genToPointers(OpBuilder &builder, Location loc, Value tensor, uint64_t d);
+
+/// Infers the result type and generates ToIndicesOp. If the dim is within a COO
+/// region, the result type is a memref with unknown stride and offset.
+/// Otherwise, the result type is a memref without any specified layout.
+Value genToIndices(OpBuilder &builder, Location loc, Value tensor, uint64_t d,
+                   uint64_t cooStart);
+
+/// Infers the result type and generates ToValuesOp.
+Value genToValues(OpBuilder &builder, Location loc, Value tensor);
+
 } // namespace sparse_tensor
 } // namespace mlir
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index d77b060d931d6..a9cc10006f4a7 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -124,27 +124,19 @@ void LoopEmitter::initializeLoopEmit(OpBuilder &builder, Location loc,
     auto rank = rtp.getRank();
     auto shape = rtp.getShape();
     auto enc = getSparseTensorEncoding(rtp);
-    auto dynShape = {ShapedType::kDynamic};
+    uint64_t cooStart = enc ? getCOOStart(enc) : rank;
     // Scan all dimensions of current tensor.
     for (int64_t d = 0; d < rank; d++) {
       // This should be called only once at beginning.
       assert(!ptrBuffer[t][d] && !idxBuffer[t][d] && !highs[t][d]);
       // Handle sparse storage schemes.
       if (isCompressedDLT(dimTypes[t][d])) {
-        auto ptrTp =
-            MemRefType::get(dynShape, getPointerOverheadType(builder, enc));
-        auto indTp =
-            MemRefType::get(dynShape, getIndexOverheadType(builder, enc));
-        auto dim = builder.getIndexAttr(d);
         // Generate sparse primitives to obtains pointer and indices.
-        ptrBuffer[t][d] = builder.create<ToPointersOp>(loc, ptrTp, tensor, dim);
-        idxBuffer[t][d] = builder.create<ToIndicesOp>(loc, indTp, tensor, dim);
+        ptrBuffer[t][d] = genToPointers(builder, loc, tensor, d);
+        idxBuffer[t][d] = genToIndices(builder, loc, tensor, d, cooStart);
       } else if (isSingletonDLT(dimTypes[t][d])) {
         // Singleton dimension, fetch indices.
-        auto indTp =
-            MemRefType::get(dynShape, getIndexOverheadType(builder, enc));
-        auto dim = builder.getIndexAttr(d);
-        idxBuffer[t][d] = builder.create<ToIndicesOp>(loc, indTp, tensor, dim);
+        idxBuffer[t][d] = genToIndices(builder, loc, tensor, d, cooStart);
       } else {
         // Dense dimension, nothing to fetch.
         assert(isDenseDLT(dimTypes[t][d]));

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index e3bcfe8117da2..d138b6de7f94e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -878,60 +878,65 @@ class SparseInsertConverter : public OpConversionPattern<InsertOp> {
   }
 };
 
-/// Base class for getter-like operations, e.g., to_indices, to_pointers.
-template <typename SourceOp, typename Base>
-class SparseGetterOpConverter : public OpConversionPattern<SourceOp> {
+/// Sparse codegen rule for pointer accesses.
+class SparseToPointersConverter : public OpConversionPattern<ToPointersOp> {
 public:
-  using OpAdaptor = typename SourceOp::Adaptor;
-  using OpConversionPattern<SourceOp>::OpConversionPattern;
+  using OpAdaptor = typename ToPointersOp::Adaptor;
+  using OpConversionPattern<ToPointersOp>::OpConversionPattern;
   LogicalResult
-  matchAndRewrite(SourceOp op, OpAdaptor adaptor,
+  matchAndRewrite(ToPointersOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // Replace the requested pointer access with corresponding field.
     // The cast_op is inserted by type converter to intermix 1:N type
     // conversion.
     auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
-    Value field = Base::getFieldForOp(desc, op);
-    rewriter.replaceOp(op, field);
-    return success();
-  }
-};
-
-/// Sparse codegen rule for pointer accesses.
-class SparseToPointersConverter
-    : public SparseGetterOpConverter<ToPointersOp, SparseToPointersConverter> {
-public:
-  using SparseGetterOpConverter::SparseGetterOpConverter;
-  // Callback for SparseGetterOpConverter.
-  static Value getFieldForOp(const SparseTensorDescriptor &desc,
-                             ToPointersOp op) {
     uint64_t dim = op.getDimension().getZExtValue();
-    return desc.getPtrMemRef(dim);
+    rewriter.replaceOp(op, desc.getPtrMemRef(dim));
+    return success();
   }
 };
 
 /// Sparse codegen rule for index accesses.
-class SparseToIndicesConverter
-    : public SparseGetterOpConverter<ToIndicesOp, SparseToIndicesConverter> {
+class SparseToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
 public:
-  using SparseGetterOpConverter::SparseGetterOpConverter;
-  // Callback for SparseGetterOpConverter.
-  static Value getFieldForOp(const SparseTensorDescriptor &desc,
-                             ToIndicesOp op) {
+  using OpAdaptor = typename ToIndicesOp::Adaptor;
+  using OpConversionPattern<ToIndicesOp>::OpConversionPattern;
+  LogicalResult
+  matchAndRewrite(ToIndicesOp op, OpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+    // Replace the requested pointer access with corresponding field.
+    // The cast_op is inserted by type converter to intermix 1:N type
+    // conversion.
+    auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
     uint64_t dim = op.getDimension().getZExtValue();
-    return desc.getIdxMemRef(dim);
+    Value field = desc.getIdxMemRef(dim);
+
+    // Insert a cast to bridge the actual type to the user expected type. If the
+    // actual type and the user expected type aren't compatible, the compiler or
+    // the runtime will issue an error.
+    Type resType = op.getResult().getType();
+    if (resType != field.getType())
+      field = rewriter.create<memref::CastOp>(op.getLoc(), resType, field);
+    rewriter.replaceOp(op, field);
+
+    return success();
   }
 };
 
 /// Sparse codegen rule for value accesses.
-class SparseToValuesConverter
-    : public SparseGetterOpConverter<ToValuesOp, SparseToValuesConverter> {
+class SparseToValuesConverter : public OpConversionPattern<ToValuesOp> {
 public:
-  using SparseGetterOpConverter::SparseGetterOpConverter;
-  // Callback for SparseGetterOpConverter.
-  static Value getFieldForOp(const SparseTensorDescriptor &desc,
-                             ToValuesOp /*op*/) {
-    return desc.getValMemRef();
+  using OpAdaptor = typename ToValuesOp::Adaptor;
+  using OpConversionPattern<ToValuesOp>::OpConversionPattern;
+  LogicalResult
+  matchAndRewrite(ToValuesOp op, OpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+    // Replace the requested pointer access with corresponding field.
+    // The cast_op is inserted by type converter to intermix 1:N type
+    // conversion.
+    auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
+    rewriter.replaceOp(op, desc.getValMemRef());
+    return success();
   }
 };
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 1d5d28ca512af..bf61164682c88 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -1122,10 +1122,24 @@ class SparseTensorToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
     Type resType = op.getType();
     Type indType = resType.cast<ShapedType>().getElementType();
     SmallString<15> name{"sparseIndices", overheadTypeFunctionSuffix(indType)};
-    Value dim =
-        constantIndex(rewriter, op->getLoc(), op.getDimension().getZExtValue());
-    replaceOpWithFuncCall(rewriter, op, name, resType,
-                          {adaptor.getTensor(), dim}, EmitCInterface::On);
+    Location loc = op->getLoc();
+    Value dim = constantIndex(rewriter, loc, op.getDimension().getZExtValue());
+
+    // The function returns a MemRef without a layout.
+    MemRefType callRetType = get1DMemRefType(indType, false);
+    SmallVector<Value> operands{adaptor.getTensor(), dim};
+    auto fn = getFunc(op->getParentOfType<ModuleOp>(), name, callRetType,
+                      operands, EmitCInterface::On);
+    Value callRet =
+        rewriter.create<func::CallOp>(loc, callRetType, fn, operands)
+            .getResult(0);
+
+    // Cast the MemRef type to the type expected by the users, though these
+    // two types should be compatible at runtime.
+    if (resType != callRetType)
+      callRet = rewriter.create<memref::CastOp>(loc, resType, callRet);
+    rewriter.replaceOp(op, callRet);
+
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 013b8f17ba49c..348996cbc3faf 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -809,16 +809,14 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
       // Sort the COO tensor so that its elements are ordered via increasing
       // indices for the storage ordering of the dst tensor.
       SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
-      auto dynShape = {ShapedType::kDynamic};
-      auto indTp =
-          MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc));
       uint64_t rank = dstTp.getRank();
+      uint64_t cooStart = getCOOStart(encSrc);
       // Gather the indices-arrays in the dst tensor storage order.
       SmallVector<Value> xs(rank, Value());
       for (uint64_t i = 0; i < rank; i++) {
         uint64_t orgDim = toOrigDim(encSrc, i);
-        xs[toStoredDim(encDst, orgDim)] = rewriter.create<ToIndicesOp>(
-            loc, indTp, src, rewriter.getIndexAttr(i));
+        xs[toStoredDim(encDst, orgDim)] =
+            genToIndices(rewriter, loc, src, i, cooStart);
       }
 
       // Retrieve NNZ.
@@ -827,8 +825,7 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
                                                 nnz);
 
       // Retrieve the values-array.
-      auto valTp = MemRefType::get(dynShape, srcTp.getElementType());
-      Value y = rewriter.create<ToValuesOp>(loc, valTp, src);
+      Value y = genToValues(rewriter, loc, src);
 
       // Sort the COO tensor.
       rewriter.create<SortOp>(loc, nnz, xs, ValueRange{y});

diff  --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
index 8336a142528bf..436b67661a02f 100644
--- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
@@ -126,7 +126,7 @@ func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100
 //       CHECK-RWT:     %[[I1:.*]] = sparse_tensor.indices %[[COO]] {dimension = 1 : index}
 //       CHECK-RWT:     %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[COO]]
 //       CHECK-RWT:     %[[V:.*]] = sparse_tensor.values %[[COO]]
-//       CHECK-RWT:     sparse_tensor.sort %[[NNZ]], %[[I0]], %[[I1]] jointly %[[V]] : memref<?xindex>, memref<?xindex> jointly memref<?xf64>
+//       CHECK-RWT:     sparse_tensor.sort %[[NNZ]], %[[I0]], %[[I1]] jointly %[[V]]
 //       CHECK-RWT:     %[[T3:.*]] = bufferization.alloc_tensor()
 //       CHECK-RWT:     %[[T4:.*]] = sparse_tensor.foreach in %[[COO]] init(%[[T3]])
 //       CHECK-RWT:     ^bb0(%[[L1I0:.*]]: index, %[[L1I1:.*]]: index, %[[L1V:.*]]: f64, %[[L1T:.*]]: tensor
@@ -186,7 +186,7 @@ func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #CSR> {
 //       CHECK-RWT:     %[[I1:.*]] = sparse_tensor.indices %[[COO]] {dimension = 1 : index}
 //       CHECK-RWT:     %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[COO]]
 //       CHECK-RWT:     %[[V:.*]] = sparse_tensor.values %[[COO]]
-//       CHECK-RWT:     sparse_tensor.sort %[[NNZ]], %[[I0]], %[[I1]] jointly %[[V]] : memref<?xindex>, memref<?xindex> jointly memref<?xf32>
+//       CHECK-RWT:     sparse_tensor.sort %[[NNZ]], %[[I0]], %[[I1]] jointly %[[V]]
 //       CHECK-RWT:     %[[T3:.*]] = bufferization.alloc_tensor()
 //       CHECK-RWT:     %[[T4:.*]] = sparse_tensor.foreach in %[[COO]] init(%[[T3]])
 //       CHECK-RWT:     ^bb0(%[[L1I0:.*]]: index, %[[L1I1:.*]]: index, %[[L1V:.*]]: f32, %[[L1T:.*]]: tensor

diff  --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
index 33c260f929f42..c30525f224a68 100644
--- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
+++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
@@ -71,17 +71,17 @@ func.func @sparse_scale(%argx: tensor<?x?xf32, #SortedCOO>) -> tensor<?x?xf32, #
 // CHECK-DAG:     %[[VAL_3:.*]] = arith.constant 0 : index
 // CHECK-DAG:     %[[VAL_4:.*]] = arith.constant 1 : index
 // CHECK-DAG:     %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG:     %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG:     %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG:     %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG:     %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
 // CHECK-DAG:     %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
 // CHECK-DAG:     %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
 // CHECK-DAG:     %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
 // CHECK-DAG:     %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
 // CHECK-DAG:     %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
 // CHECK:         scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_4]] {
-// CHECK:           %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK:           %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex, strided<[?], offset: ?>>
 // CHECK:           %[[VAL_15:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_14]]] : memref<32xf64>
-// CHECK:           %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK:           %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex, strided<[?], offset: ?>>
 // CHECK:           %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xf64>
 // CHECK:           %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_16]]] : memref<64xf64>
 // CHECK:           %[[VAL_19:.*]] = arith.mulf %[[VAL_17]], %[[VAL_18]] : f64
@@ -113,12 +113,12 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
 // CHECK-DAG:     %[[VAL_4:.*]] = arith.constant 0 : index
 // CHECK-DAG:     %[[VAL_5:.*]] = arith.constant 1 : index
 // CHECK-DAG:     %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG:     %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG:     %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG:     %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG:     %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
 // CHECK-DAG:     %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
 // CHECK-DAG:     %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG:     %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG:     %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG:     %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG:     %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
 // CHECK-DAG:     %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
 // CHECK-DAG:     %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64>
 // CHECK-DAG:     linalg.fill ins(%[[VAL_3]] : f64) outs(%[[VAL_14]] : memref<32x64xf64>)
@@ -133,8 +133,8 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
 // CHECK:           scf.condition(%[[VAL_24]]) %[[VAL_20]], %[[VAL_21]] : index, index
 // CHECK:         } do {
 // CHECK:         ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
-// CHECK:           %[[VAL_27:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK:           %[[VAL_28:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK:           %[[VAL_27:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_25]]] : memref<?xindex, strided<[?], offset: ?>>
+// CHECK:           %[[VAL_28:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex, strided<[?], offset: ?>>
 // CHECK:           %[[VAL_29:.*]] = arith.cmpi ult, %[[VAL_28]], %[[VAL_27]] : index
 // CHECK:           %[[VAL_30:.*]] = arith.select %[[VAL_29]], %[[VAL_28]], %[[VAL_27]] : index
 // CHECK:           %[[VAL_31:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_30]] : index
@@ -150,8 +150,8 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
 // CHECK:               scf.condition(%[[VAL_41]]) %[[VAL_37]], %[[VAL_38]] : index, index
 // CHECK:             } do {
 // CHECK:             ^bb0(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index):
-// CHECK:               %[[VAL_44:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_42]]] : memref<?xindex>
-// CHECK:               %[[VAL_45:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_43]]] : memref<?xindex>
+// CHECK:               %[[VAL_44:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_42]]] : memref<?xindex, strided<[?], offset: ?>>
+// CHECK:               %[[VAL_45:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_43]]] : memref<?xindex, strided<[?], offset: ?>>
 // CHECK:               %[[VAL_46:.*]] = arith.cmpi ult, %[[VAL_45]], %[[VAL_44]] : index
 // CHECK:               %[[VAL_47:.*]] = arith.select %[[VAL_46]], %[[VAL_45]], %[[VAL_44]] : index
 // CHECK:               %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index


        


More information about the Mlir-commits mailing list