[Mlir-commits] [mlir] [mlir][sparse] replace specialized buffer setup with util code (PR #68461)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Oct 6 18:28:35 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir

<details>
<summary>Changes</summary>

This completely centralizes all set up related to dim2lvl and lvl2dim for the runtime library (and even parts of direct IR codegen) into one place! And all comptatible with the MapRef data structure that should be used in all remaining clients of dim2lvl and lvl2dim.

NOTE: the convert_x2y.mlir tests were becoming too overloaded
      so I decided to bring them back to the basics; if e.g.
      more coverage of the foreach is required, they should
      go into isolated smalle tests

---

Patch is 140.46 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/68461.diff


8 Files Affected:

- (modified) mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h (+3-2) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp (+25-68) 
- (modified) mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp (+8-4) 
- (modified) mlir/test/Dialect/SparseTensor/conversion.mlir (+5-7) 
- (modified) mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir (+286-193) 
- (modified) mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir (+290-325) 
- (modified) mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir (+137-130) 
- (modified) mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir (+66-70) 


``````````diff
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index f25df11d15fdad1..e723a354345849d 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -57,8 +57,8 @@ MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensor( // NOLINT
     StridedMemRefType<index_type, 1> *dimSizesRef,
     StridedMemRefType<index_type, 1> *lvlSizesRef,
     StridedMemRefType<DimLevelType, 1> *lvlTypesRef,
-    StridedMemRefType<index_type, 1> *lvl2dimRef,
-    StridedMemRefType<index_type, 1> *dim2lvlRef, OverheadType posTp,
+    StridedMemRefType<index_type, 1> *dim2lvlRef,
+    StridedMemRefType<index_type, 1> *lvl2dimRef, OverheadType posTp,
     OverheadType crdTp, PrimaryType valTp, Action action, void *ptr);
 
 /// Tensor-storage method to obtain direct access to the values array.
@@ -85,6 +85,7 @@ MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES)
 #undef DECL_SPARSECOORDINATES
 
 /// Coordinate-scheme method for adding a new element.
+/// TODO: remove dim2lvl
 #define DECL_ADDELT(VNAME, V)                                                  \
   MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_addElt##VNAME(                   \
       void *lvlCOO, StridedMemRefType<V, 0> *vref,                             \
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index eb0c5160e8d6193..bb92029ff1e924d 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -187,25 +187,38 @@ static Value genLvlPtrsBuffers(OpBuilder &builder, Location loc,
 
 /// This class abstracts over the API of `_mlir_ciface_newSparseTensor`:
 /// the "swiss army knife" method of the sparse runtime support library
-/// for materializing sparse tensors into the computation.  This abstraction
-/// reduces the need to make modifications to client code whenever that
-/// API changes.
+/// for materializing sparse tensors into the computation. This abstraction
+/// reduces the need for modifications when the API changes.
 class NewCallParams final {
 public:
-  /// Allocates the `ValueRange` for the `func::CallOp` parameters,
-  /// but does not initialize them.
+  /// Allocates the `ValueRange` for the `func::CallOp` parameters.
   NewCallParams(OpBuilder &builder, Location loc)
       : builder(builder), loc(loc), pTp(getOpaquePointerType(builder)) {}
 
   /// Initializes all static parameters (i.e., those which indicate
   /// type-level information such as the encoding and sizes), generating
   /// MLIR buffers as needed, and returning `this` for method chaining.
-  /// This method does not set the action and pointer arguments, since
-  /// those are handled by `genNewCall` instead.
-  NewCallParams &genBuffers(SparseTensorType stt, ValueRange dimSizes);
+  NewCallParams &genBuffers(SparseTensorType stt,
+                            ArrayRef<Value> dimSizesValues) {
+    const Dimension dimRank = stt.getDimRank();
+    assert(dimSizesValues.size() == static_cast<size_t>(dimRank));
+    // Sparsity annotations.
+    params[kParamLvlTypes] = genLvlTypesBuffer(builder, loc, stt);
+    // Construct dimSizes, lvlSizes, dim2lvl, and lvl2dim buffers.
+    params[kParamDimSizes] = allocaBuffer(builder, loc, dimSizesValues);
+    params[kParamLvlSizes] = genReaderBuffers(
+        builder, loc, stt, dimSizesValues, params[kParamDimSizes],
+        params[kParamDim2Lvl], params[kParamLvl2Dim]);
+    // Secondary and primary types encoding.
+    setTemplateTypes(stt);
+    // Finally, make note that initialization is complete.
+    assert(isInitialized() && "Initialization failed");
+    // And return `this` for method chaining.
+    return *this;
+  }
 
   /// (Re)sets the C++ template type parameters, and returns `this`
-  /// for method chaining.  This is already done as part of `genBuffers`,
+  /// for method chaining. This is already done as part of `genBuffers`,
   /// but is factored out so that it can also be called independently
   /// whenever subsequent `genNewCall` calls want to reuse the same
   /// buffers but different type parameters.
@@ -236,7 +249,7 @@ class NewCallParams final {
   // this one-off getter, and to avoid potential mixups)?
   Value getDimToLvl() const {
     assert(isInitialized() && "Must initialize before getDimToLvl");
-    return params[kParamDimToLvl];
+    return params[kParamDim2Lvl];
   }
 
   /// Generates a function call, with the current static parameters
@@ -257,8 +270,8 @@ class NewCallParams final {
   static constexpr unsigned kParamDimSizes = 0;
   static constexpr unsigned kParamLvlSizes = 1;
   static constexpr unsigned kParamLvlTypes = 2;
-  static constexpr unsigned kParamLvlToDim = 3;
-  static constexpr unsigned kParamDimToLvl = 4;
+  static constexpr unsigned kParamDim2Lvl = 3;
+  static constexpr unsigned kParamLvl2Dim = 4;
   static constexpr unsigned kParamPosTp = 5;
   static constexpr unsigned kParamCrdTp = 6;
   static constexpr unsigned kParamValTp = 7;
@@ -271,62 +284,6 @@ class NewCallParams final {
   Value params[kNumParams];
 };
 
-// TODO: see the note at `_mlir_ciface_newSparseTensor` about how
-// the meaning of the various arguments (e.g., "sizes" vs "shapes")
-// is inconsistent between the different actions.
-NewCallParams &NewCallParams::genBuffers(SparseTensorType stt,
-                                         ValueRange dimSizes) {
-  const Level lvlRank = stt.getLvlRank();
-  const Dimension dimRank = stt.getDimRank();
-  // Sparsity annotations.
-  params[kParamLvlTypes] = genLvlTypesBuffer(builder, loc, stt);
-  // Dimension-sizes array of the enveloping tensor.  Useful for either
-  // verification of external data, or for construction of internal data.
-  assert(dimSizes.size() == static_cast<size_t>(dimRank) &&
-         "Dimension-rank mismatch");
-  params[kParamDimSizes] = allocaBuffer(builder, loc, dimSizes);
-  // The level-sizes array must be passed as well, since for arbitrary
-  // dimToLvl mappings it cannot be trivially reconstructed at runtime.
-  // For now however, since we're still assuming permutations, we will
-  // initialize this parameter alongside the `dimToLvl` and `lvlToDim`
-  // parameters below.  We preinitialize `lvlSizes` for code symmetry.
-  SmallVector<Value> lvlSizes(lvlRank);
-  // The dimension-to-level mapping and its inverse.  We must preinitialize
-  // `dimToLvl` so that the true branch below can perform random-access
-  // `operator[]` assignment.  We preinitialize `lvlToDim` for code symmetry.
-  SmallVector<Value> dimToLvl(dimRank);
-  SmallVector<Value> lvlToDim(lvlRank);
-  if (!stt.isIdentity()) {
-    const auto dimToLvlMap = stt.getDimToLvl();
-    assert(dimToLvlMap.isPermutation());
-    for (Level l = 0; l < lvlRank; l++) {
-      // The `d`th source variable occurs in the `l`th result position.
-      const Dimension d = dimToLvlMap.getDimPosition(l);
-      dimToLvl[d] = constantIndex(builder, loc, l);
-      lvlToDim[l] = constantIndex(builder, loc, d);
-      lvlSizes[l] = dimSizes[d];
-    }
-  } else {
-    // The `SparseTensorType` ctor already ensures `dimRank == lvlRank`
-    // when `isIdentity`; so no need to re-assert it here.
-    for (Level l = 0; l < lvlRank; l++) {
-      dimToLvl[l] = lvlToDim[l] = constantIndex(builder, loc, l);
-      lvlSizes[l] = dimSizes[l];
-    }
-  }
-  params[kParamLvlSizes] = allocaBuffer(builder, loc, lvlSizes);
-  params[kParamLvlToDim] = allocaBuffer(builder, loc, lvlToDim);
-  params[kParamDimToLvl] = stt.isIdentity()
-                               ? params[kParamLvlToDim]
-                               : allocaBuffer(builder, loc, dimToLvl);
-  // Secondary and primary types encoding.
-  setTemplateTypes(stt);
-  // Finally, make note that initialization is complete.
-  assert(isInitialized() && "Initialization failed");
-  // And return `this` for method chaining.
-  return *this;
-}
-
 /// Generates a call to obtain the values array.
 static Value genValuesCall(OpBuilder &builder, Location loc, ShapedType tp,
                            ValueRange ptr) {
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index 5b910716c0f9e59..05da8cd79190ed0 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -231,8 +231,8 @@ void *_mlir_ciface_newSparseTensor( // NOLINT
     StridedMemRefType<index_type, 1> *dimSizesRef,
     StridedMemRefType<index_type, 1> *lvlSizesRef,
     StridedMemRefType<DimLevelType, 1> *lvlTypesRef,
-    StridedMemRefType<index_type, 1> *lvl2dimRef,
-    StridedMemRefType<index_type, 1> *dim2lvlRef, OverheadType posTp,
+    StridedMemRefType<index_type, 1> *dim2lvlRef,
+    StridedMemRefType<index_type, 1> *lvl2dimRef, OverheadType posTp,
     OverheadType crdTp, PrimaryType valTp, Action action, void *ptr) {
   ASSERT_NO_STRIDE(dimSizesRef);
   ASSERT_NO_STRIDE(lvlSizesRef);
@@ -250,6 +250,9 @@ void *_mlir_ciface_newSparseTensor( // NOLINT
   const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef);
   const index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef);
 
+  // Prepare map.
+  // TODO: start using MapRef map(dimRank, lvlRank, dim2lvl, lvl2dim) below
+
   // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
   // This is safe because of the static_assert above.
   if (posTp == OverheadType::kIndex)
@@ -400,6 +403,7 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES)
 #undef IMPL_GETOVERHEAD
 
 // TODO: use MapRef here for translation of coordinates
+// TOOD: remove dim2lvl
 #define IMPL_ADDELT(VNAME, V)                                                  \
   void *_mlir_ciface_addElt##VNAME(                                            \
       void *lvlCOO, StridedMemRefType<V, 0> *vref,                             \
@@ -540,13 +544,13 @@ void *_mlir_ciface_newSparseTensorFromReader(
   SparseTensorReader &reader = *static_cast<SparseTensorReader *>(p);
   ASSERT_NO_STRIDE(lvlSizesRef);
   ASSERT_NO_STRIDE(lvlTypesRef);
-  ASSERT_NO_STRIDE(lvl2dimRef);
   ASSERT_NO_STRIDE(dim2lvlRef);
+  ASSERT_NO_STRIDE(lvl2dimRef);
   const uint64_t dimRank = reader.getRank();
   const uint64_t lvlRank = MEMREF_GET_USIZE(lvlSizesRef);
   ASSERT_USIZE_EQ(lvlTypesRef, lvlRank);
-  ASSERT_USIZE_EQ(lvl2dimRef, lvlRank);
   ASSERT_USIZE_EQ(dim2lvlRef, dimRank);
+  ASSERT_USIZE_EQ(lvl2dimRef, lvlRank);
   (void)dimRank;
   const index_type *lvlSizes = MEMREF_GET_PAYLOAD(lvlSizesRef);
   const DimLevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef);
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index 138736e26c1dfdd..29093a055ab2e04 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -136,18 +136,16 @@ func.func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor
 //   CHECK-DAG: %[[Empty:.*]] = arith.constant 0 : i32
 //   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-//   CHECK-DAG: %[[DimSizes0:.*]] = memref.alloca() : memref<2xindex>
-//   CHECK-DAG: %[[LvlSizes0:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[LvlTypes0:.*]] = memref.alloca() : memref<2xi8>
+//   CHECK-DAG: %[[Sizes0:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[Iota0:.*]] = memref.alloca() : memref<2xindex>
-//   CHECK-DAG: %[[DimSizes:.*]] = memref.cast %[[DimSizes0]] : memref<2xindex> to memref<?xindex>
-//   CHECK-DAG: %[[LvlSizes:.*]] = memref.cast %[[LvlSizes0]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: %[[LvlTypes:.*]] = memref.cast %[[LvlTypes0]] : memref<2xi8> to memref<?xi8>
+//   CHECK-DAG: %[[Sizes:.*]] = memref.cast %[[Sizes0]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: %[[Iota:.*]] = memref.cast %[[Iota0]] : memref<2xindex> to memref<?xindex>
-//   CHECK-DAG: memref.store %[[I]], %[[DimSizes0]][%[[C0]]] : memref<2xindex>
-//   CHECK-DAG: memref.store %[[J]], %[[DimSizes0]][%[[C1]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[I]], %[[Sizes0]][%[[C0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[J]], %[[Sizes0]][%[[C1]]] : memref<2xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
-//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[DimSizes]], %[[LvlSizes]], %[[LvlTypes]], %[[Iota]], %[[Iota]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]])
+//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[Sizes]], %[[Sizes]], %[[LvlTypes]], %[[Iota]], %[[Iota]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #CSR> {
   %0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf64, #CSR>
diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
index b11da60cd48308b..1a69c80f7ecadfd 100644
--- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
@@ -1,6 +1,4 @@
 // RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s
-// RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-foreach=false" \
-// RUN: --canonicalize --cse | FileCheck %s --check-prefix=CHECK-RWT
 
 #SparseVector = #sparse_tensor.encoding<{
   map = (d0) -> (d0 : compressed)
@@ -18,161 +16,187 @@
   map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
 }>
 
-// CHECK-LABEL: func @sparse_convert_1d(
-//  CHECK-SAME: %[[A:.*]]: tensor<?xi32>) -> !llvm.ptr<i8> {
-//   CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32
-//   CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32
-//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : i32
-//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-//   CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xi32>
-//   CHECK-DAG: %[[LvlTypes:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[DimSizes:.*]] = memref.alloca() : memref<1xindex>
-//   CHECK-DAG: %[[LvlSizes:.*]] = memref.alloca() : memref<1xindex>
-//   CHECK-DAG: %[[Iota:.*]] = memref.alloca() : memref<1xindex>
-//   CHECK-DAG: %[[LvlTypesP:.*]] = memref.cast %[[LvlTypes]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<1xindex> to memref<?xindex>
-//   CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<1xindex> to memref<?xindex>
-//   CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<1xindex> to memref<?xindex>
-//       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
-//       CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]])
-//       CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex>
-//       CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref<?xindex>
-//       CHECK: %[[BUF:.*]] = memref.alloca() : memref<i32>
-//       CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U]] step %[[C1]] {
-//       CHECK:   %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor<?xi32>
-//       CHECK:   %[[N:.*]] = arith.cmpi ne, %[[E]], %[[I0]] : i32
-//       CHECK:   scf.if %[[N]] {
-//       CHECK:     memref.store %[[I]], %[[M]][%[[C0]]] : memref<1xindex>
-//       CHECK:     memref.store %[[E]], %[[BUF]][] : memref<i32>
-//       CHECK:     call @addEltI32(%[[C]], %[[BUF]], %[[T]], %[[IotaP]])
-//       CHECK:   }
-//       CHECK: }
-//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
-//       CHECK: call @delSparseTensorCOOI32(%[[C]])
-//       CHECK: return %[[T]] : !llvm.ptr<i8>
+// CHECK-LABEL:   func.func @sparse_convert_1d(
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<?xi32>) -> !llvm.ptr<i8> {
+// CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 2 : i32
+// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 4 : i32
+// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 6 : i32
+// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 0 : i32
+// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 8 : i8
+// CHECK-DAG:       %[[VAL_7:.*]] = arith.constant 0 : index
+// CHECK:           %[[VAL_8:.*]] = tensor.dim %[[VAL_0]], %[[VAL_7]] : tensor<?xi32>
+// CHECK:           %[[VAL_9:.*]] = memref.alloca() : memref<1xi8>
+// CHECK:           %[[VAL_10:.*]] = memref.cast %[[VAL_9]] : memref<1xi8> to memref<?xi8>
+// CHECK:           memref.store %[[VAL_6]], %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<1xi8>
+// CHECK:           %[[VAL_11:.*]] = memref.alloca() : memref<1xindex>
+// CHECK:           %[[VAL_12:.*]] = memref.cast %[[VAL_11]] : memref<1xindex> to memref<?xindex>
+// CHECK:           memref.store %[[VAL_8]], %[[VAL_11]]{{\[}}%[[VAL_7]]] : memref<1xindex>
+// CHECK:           %[[VAL_13:.*]] = memref.alloca() : memref<1xindex>
+// CHECK:           %[[VAL_14:.*]] = memref.cast %[[VAL_13]] : memref<1xindex> to memref<?xindex>
+// CHECK:           memref.store %[[VAL_7]], %[[VAL_13]]{{\[}}%[[VAL_7]]] : memref<1xindex>
+// CHECK:           %[[VAL_15:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
+// CHECK:           %[[VAL_16:.*]] = call @newSparseTensor(%[[VAL_12]], %[[VAL_12]], %[[VAL_10]], %[[VAL_14]], %[[VAL_14]], %[[VAL_4]], %[[VAL_4]], %[[VAL_3]], %[[VAL_2]], %[[VAL_15]]) : (memref<?xindex>, memref<?xindex>, memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+// CHECK:           %[[VAL_17:.*]] = memref.alloca() : memref<1xindex>
+// CHECK:           %[[VAL_18:.*]] = memref.cast %[[VAL_17]] : memref<1xindex> to memref<?xindex>
+// CHECK:           %[[VAL_19:.*]] = memref.alloca() : memref<i32>
+// CHECK:           scf.for %[[VAL_20:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_5]] {
+// CHECK:             %[[VAL_21:.*]] = tensor.extract %[[VAL_0]]{{\[}}%[[VAL_20]]] : tensor<?xi32>
+// CHECK:             %[[VAL_22:.*]] = arith.cmpi ne, %[[VAL_21]], %[[VAL_4]] : i32
+// CHECK:             scf.if %[[VAL_22]] {
+// CHECK:               memref.store %[[VAL_20]], %[[VAL_17]]{{\[}}%[[VAL_7]]] : memref<1xindex>
+// CHECK:               memref.store %[[VAL_21]], %[[VAL_19]][] : memref<i32>
+// CHECK:               %[[VAL_23:.*]] = func.call @addEltI32(%[[VAL_16]], %[[VAL_19]], %[[VAL_18]], %[[VAL_14]]) : (!llvm.ptr<i8>, memref<i32>, memref<?xindex>, memref<?xindex>) -> !llvm.ptr<i8>
+// CHECK:             }
+// CHECK:           }
+// CHECK:           %[[VAL_24:.*]] = call @newSparseTensor(%[[VAL_12]], %[[VAL_12]], %[[VAL_10]], %[[VAL_14]], %[[VAL_14]], %[[VAL_4]], %[[VAL_4]], %[[VAL_3]], %[[VAL_1]], %[[VAL_16]]) : (memref<?xindex>, memref<?xindex>, memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+// CHECK:           call @delSparseTensorCOOI32(%[[VAL_16]]) : (!llvm.ptr<i8>) -> ()
+// CHECK:           return %[[VAL_24]] : !llvm.ptr<i8>
+// CHECK:         }
 func.func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<?xi32> to tensor<?xi32, #SparseVector>
   return %0 : tensor<?xi32, #SparseVector>
 }
 
-// CHECK-LABEL: func @sparse_convert_complex(
-//  CHECK-SAME: %[[A:.*]]: tensor<100xcomplex<f64>>) -> !llvm.ptr<i8> {
-//   CHECK-DAG: %[[CC:.*]] = complex.constant [0.000000e+00, 0.000000e+00] : complex<f64>
-//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
-//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-//   CHECK-DAG: %[[C100:.*]] = arith.constant 100 : index
-//       CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] {
-//       CHECK:   %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor<100xcomplex<f64>>
-//       CHECK:   %[[N:.*]] = complex.neq %[[E]], %[[CC]] : complex<f64>
-//       CHECK:   scf.if %[[N]] {
-//       CHECK:     memref.store %[[I]], %{{.*}}[%[[C0]]] : memref<1xindex>
-//       CHECK:     call @addEltC64
-//       CHECK:   }
-//       CHECK: }
-//       CHECK: %[[T:.*]] = call @newSparseTensor
-//       CHECK: call @delSparseTensorC...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/68461


More information about the Mlir-commits mailing list