[Mlir-commits] [mlir] bd5494d - [mlir][sparse] make index type explicit in public API of support library

Aart Bik llvmlistbot at llvm.org
Wed Oct 20 12:46:40 PDT 2021


Author: Aart Bik
Date: 2021-10-20T12:46:31-07:00
New Revision: bd5494d127a39f619b7d0e5e967f190e0e61d6e7

URL: https://github.com/llvm/llvm-project/commit/bd5494d127a39f619b7d0e5e967f190e0e61d6e7
DIFF: https://github.com/llvm/llvm-project/commit/bd5494d127a39f619b7d0e5e967f190e0e61d6e7.diff

LOG: [mlir][sparse] make index type explicit in public API of support library

The current implementation used explicit index->int64_t casts for some, but
not all instances of passing values of type "index" in and from the sparse
support library. This revision makes the situation more consistent by
using new "index_t" type at all such places  (which allows for less trivial
casting in the generated MLIR code).  Note that the current revision still
assumes that "index" is 64-bit wide. If we want to support targets with
alternative "index" bit widths, we need to build the support library different.
But the current revision is a step forward by making this requirement explicit
and more visible.

Reviewed By: wrengr

Differential Revision: https://reviews.llvm.org/D112122

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/ExecutionEngine/SparseUtils.cpp
    mlir/test/Dialect/SparseTensor/conversion.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index ffd852fef7333..db6b8d2548f06 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -45,7 +45,7 @@ enum Action : uint32_t {
 
 /// Returns internal type encoding for primary storage. Keep these
 /// values consistent with the sparse runtime support library.
-static unsigned getPrimaryTypeEncoding(Type tp) {
+static uint32_t getPrimaryTypeEncoding(Type tp) {
   if (tp.isF64())
     return 1;
   if (tp.isF32())
@@ -63,7 +63,7 @@ static unsigned getPrimaryTypeEncoding(Type tp) {
 
 /// Returns internal type encoding for overhead storage. Keep these
 /// values consistent with the sparse runtime support library.
-static unsigned getOverheadTypeEncoding(unsigned width) {
+static uint32_t getOverheadTypeEncoding(unsigned width) {
   switch (width) {
   default:
     return 1;
@@ -78,7 +78,7 @@ static unsigned getOverheadTypeEncoding(unsigned width) {
 
 /// Returns internal dimension level type encoding. Keep these
 /// values consistent with the sparse runtime support library.
-static unsigned
+static uint32_t
 getDimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt) {
   switch (dlt) {
   case SparseTensorEncodingAttr::DimLevelType::Dense:
@@ -103,12 +103,6 @@ inline static Value constantIndex(ConversionPatternRewriter &rewriter,
   return rewriter.create<arith::ConstantIndexOp>(loc, i);
 }
 
-/// Generates a constant of `i64` type.
-inline static Value constantI64(ConversionPatternRewriter &rewriter,
-                                Location loc, int64_t i) {
-  return rewriter.create<arith::ConstantIntOp>(loc, i, 64);
-}
-
 /// Generates a constant of `i32` type.
 inline static Value constantI32(ConversionPatternRewriter &rewriter,
                                 Location loc, int32_t i) {
@@ -246,11 +240,9 @@ static void newParams(ConversionPatternRewriter &rewriter,
   params.push_back(genBuffer(rewriter, loc, attrs));
   // Dimension sizes array of the enveloping tensor. Useful for either
   // verification of external data, or for construction of internal data.
-  // The index type is casted to I64 for API consistency.
-  Type iTp = rewriter.getI64Type();
   SmallVector<Value, 4> sizes;
   for (Value s : szs)
-    sizes.push_back(rewriter.create<arith::IndexCastOp>(loc, s, iTp));
+    sizes.push_back(s);
   params.push_back(genBuffer(rewriter, loc, sizes));
   // Dimension order permutation array. This is the "identity" permutation by
   // default, or otherwise the "reverse" permutation of a given ordering, so
@@ -258,21 +250,21 @@ static void newParams(ConversionPatternRewriter &rewriter,
   SmallVector<Value, 4> rev(sz);
   if (AffineMap p = enc.getDimOrdering()) {
     for (unsigned i = 0; i < sz; i++)
-      rev[p.getDimPosition(i)] = constantI64(rewriter, loc, i);
+      rev[p.getDimPosition(i)] = constantIndex(rewriter, loc, i);
   } else {
     for (unsigned i = 0; i < sz; i++)
-      rev[i] = constantI64(rewriter, loc, i);
+      rev[i] = constantIndex(rewriter, loc, i);
   }
   params.push_back(genBuffer(rewriter, loc, rev));
   // Secondary and primary types encoding.
   ShapedType resType = op->getResult(0).getType().cast<ShapedType>();
-  unsigned secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth());
-  unsigned secInd = getOverheadTypeEncoding(enc.getIndexBitWidth());
-  unsigned primary = getPrimaryTypeEncoding(resType.getElementType());
+  uint32_t secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth());
+  uint32_t secInd = getOverheadTypeEncoding(enc.getIndexBitWidth());
+  uint32_t primary = getPrimaryTypeEncoding(resType.getElementType());
   assert(primary);
-  params.push_back(constantI64(rewriter, loc, secPtr));
-  params.push_back(constantI64(rewriter, loc, secInd));
-  params.push_back(constantI64(rewriter, loc, primary));
+  params.push_back(constantI32(rewriter, loc, secPtr));
+  params.push_back(constantI32(rewriter, loc, secInd));
+  params.push_back(constantI32(rewriter, loc, primary));
   // User action and pointer.
   Type pTp = LLVM::LLVMPointerType::get(rewriter.getI8Type());
   if (!ptr)
@@ -608,7 +600,7 @@ class SparseTensorToPointersConverter
     Type eltType = resType.cast<ShapedType>().getElementType();
     StringRef name;
     if (eltType.isIndex())
-      name = "sparsePointers"; // 64-bit, but its own name for unique signature
+      name = "sparsePointers";
     else if (eltType.isInteger(64))
       name = "sparsePointers64";
     else if (eltType.isInteger(32))
@@ -637,7 +629,7 @@ class SparseTensorToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
     Type eltType = resType.cast<ShapedType>().getElementType();
     StringRef name;
     if (eltType.isIndex())
-      name = "sparseIndices"; // 64-bit, but its own name for unique signature
+      name = "sparseIndices";
     else if (eltType.isInteger(64))
       name = "sparseIndices64";
     else if (eltType.isInteger(32))

diff  --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp
index 37c9b13145254..f3a12dbd333f8 100644
--- a/mlir/lib/ExecutionEngine/SparseUtils.cpp
+++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp
@@ -273,7 +273,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
     if (tensor) {
       assert(tensor->getRank() == rank);
       for (uint64_t r = 0; r < rank; r++)
-        assert(tensor->getSizes()[perm[r]] == sizes[r] || sizes[r] == 0);
+        assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]);
       tensor->sort(); // sort lexicographically
       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
                                            tensor);
@@ -306,8 +306,8 @@ class SparseTensorStorage : public SparseTensorStorageBase {
     while (lo < hi) {
       assert(lo < elements.size() && hi <= elements.size());
       // Find segment in interval with same index elements in this dimension.
-      unsigned idx = elements[lo].indices[d];
-      unsigned seg = lo + 1;
+      uint64_t idx = elements[lo].indices[d];
+      uint64_t seg = lo + 1;
       while (seg < hi && elements[seg].indices[d] == idx)
         seg++;
       // Handle segment in interval for sparse or dense dimension.
@@ -505,14 +505,12 @@ static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
 
 extern "C" {
 
-/// Helper method to read a sparse tensor filename from the environment,
-/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
-char *getTensorFilename(uint64_t id) {
-  char var[80];
-  sprintf(var, "TENSOR%" PRIu64, id);
-  char *env = getenv(var);
-  return env;
-}
+/// This type is used in the public API at all places where MLIR expects
+/// values with the built-in type "index". For now, we simply assume that
+/// type is 64-bit, but targets with 
diff erent "index" bit widths should link
+/// with an alternatively built runtime support library.
+// TODO: support such targets?
+typedef uint64_t index_t;
 
 //===----------------------------------------------------------------------===//
 //
@@ -525,9 +523,9 @@ char *getTensorFilename(uint64_t id) {
 //
 //===----------------------------------------------------------------------===//
 
-enum OverheadTypeEnum : uint64_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 };
+enum OverheadTypeEnum : uint32_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 };
 
-enum PrimaryTypeEnum : uint64_t {
+enum PrimaryTypeEnum : uint32_t {
   kF64 = 1,
   kF32 = 2,
   kI64 = 3,
@@ -576,7 +574,7 @@ enum Action : uint32_t {
 
 #define IMPL2(NAME, TYPE, LIB)                                                 \
   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
-                           uint64_t d) {                                       \
+                           index_t d) {                                        \
     assert(ref);                                                               \
     assert(tensor);                                                            \
     std::vector<TYPE> *v;                                                      \
@@ -589,17 +587,17 @@ enum Action : uint32_t {
 
 #define IMPL3(NAME, TYPE)                                                      \
   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
-                            StridedMemRefType<uint64_t, 1> *iref,              \
-                            StridedMemRefType<uint64_t, 1> *pref) {            \
+                            StridedMemRefType<index_t, 1> *iref,               \
+                            StridedMemRefType<index_t, 1> *pref) {             \
     assert(tensor);                                                            \
     assert(iref);                                                              \
     assert(pref);                                                              \
     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
     assert(iref->sizes[0] == pref->sizes[0]);                                  \
-    const uint64_t *indx = iref->data + iref->offset;                          \
-    const uint64_t *perm = pref->data + pref->offset;                          \
+    const index_t *indx = iref->data + iref->offset;                           \
+    const index_t *perm = pref->data + pref->offset;                           \
     uint64_t isize = iref->sizes[0];                                           \
-    std::vector<uint64_t> indices(isize);                                      \
+    std::vector<index_t> indices(isize);                                       \
     for (uint64_t r = 0; r < isize; r++)                                       \
       indices[perm[r]] = indx[r];                                              \
     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
@@ -617,17 +615,17 @@ enum Action : uint32_t {
 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
 void *
 _mlir_ciface_newSparseTensor(StridedMemRefType<uint8_t, 1> *aref, // NOLINT
-                             StridedMemRefType<uint64_t, 1> *sref,
-                             StridedMemRefType<uint64_t, 1> *pref,
-                             uint64_t ptrTp, uint64_t indTp, uint64_t valTp,
+                             StridedMemRefType<index_t, 1> *sref,
+                             StridedMemRefType<index_t, 1> *pref,
+                             uint32_t ptrTp, uint32_t indTp, uint32_t valTp,
                              uint32_t action, void *ptr) {
   assert(aref && sref && pref);
   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
          pref->strides[0] == 1);
   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
   const uint8_t *sparsity = aref->data + aref->offset;
-  const uint64_t *sizes = sref->data + sref->offset;
-  const uint64_t *perm = pref->data + pref->offset;
+  const index_t *sizes = sref->data + sref->offset;
+  const index_t *perm = pref->data + pref->offset;
   uint64_t rank = aref->sizes[0];
 
   // Double matrices with all combinations of overhead storage.
@@ -687,12 +685,12 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<uint8_t, 1> *aref, // NOLINT
 }
 
 /// Methods that provide direct access to pointers, indices, and values.
-IMPL2(sparsePointers, uint64_t, getPointers)
+IMPL2(sparsePointers, index_t, getPointers)
 IMPL2(sparsePointers64, uint64_t, getPointers)
 IMPL2(sparsePointers32, uint32_t, getPointers)
 IMPL2(sparsePointers16, uint16_t, getPointers)
 IMPL2(sparsePointers8, uint8_t, getPointers)
-IMPL2(sparseIndices, uint64_t, getIndices)
+IMPL2(sparseIndices, index_t, getIndices)
 IMPL2(sparseIndices64, uint64_t, getIndices)
 IMPL2(sparseIndices32, uint32_t, getIndices)
 IMPL2(sparseIndices16, uint16_t, getIndices)
@@ -726,8 +724,17 @@ IMPL3(addEltI8, int8_t)
 //
 //===----------------------------------------------------------------------===//
 
+/// Helper method to read a sparse tensor filename from the environment,
+/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
+char *getTensorFilename(index_t id) {
+  char var[80];
+  sprintf(var, "TENSOR%" PRIu64, id);
+  char *env = getenv(var);
+  return env;
+}
+
 /// Returns size of sparse tensor in given dimension.
-uint64_t sparseDimSize(void *tensor, uint64_t d) {
+index_t sparseDimSize(void *tensor, index_t d) {
   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
 }
 

diff  --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index 577b79c6e9b0c..41dd00b80e16b 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -70,11 +70,11 @@ func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
 // CHECK-LABEL: func @sparse_new1d(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
@@ -85,11 +85,11 @@ func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
 // CHECK-LABEL: func @sparse_new2d(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
@@ -100,11 +100,11 @@ func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
 // CHECK-LABEL: func @sparse_new3d(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
@@ -118,15 +118,13 @@ func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
 //   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[II:.*]] = arith.index_cast %[[I]] : index to i64
-//   CHECK-DAG: %[[JJ:.*]] = arith.index_cast %[[J]] : index to i64
-//   CHECK-DAG: memref.store %[[II]], %[[Q]][%[[C0]]] : memref<2xi64>
-//   CHECK-DAG: memref.store %[[JJ]], %[[Q]][%[[C1]]] : memref<2xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
@@ -158,11 +156,11 @@ func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32,
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xi32>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex>
@@ -182,11 +180,11 @@ func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
 // CHECK-LABEL: func @sparse_convert_1d_ss(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
@@ -200,11 +198,11 @@ func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf3
 //   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
@@ -229,11 +227,11 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
@@ -263,11 +261,11 @@ func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
 //   CHECK-DAG: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor<?x?x?xf64>
 //   CHECK-DAG: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor<?x?x?xf64>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex>


        


More information about the Mlir-commits mailing list