[Mlir-commits] [mlir] b86d3cb - [mlir][sparse] complete various FIXMEs in sparse support lib

Aart Bik llvmlistbot at llvm.org
Wed Aug 30 21:30:36 PDT 2023


Author: Aart Bik
Date: 2023-08-30T21:30:25-07:00
New Revision: b86d3cbc1288b4d0799746c82611d17888413aec

URL: https://github.com/llvm/llvm-project/commit/b86d3cbc1288b4d0799746c82611d17888413aec
DIFF: https://github.com/llvm/llvm-project/commit/b86d3cbc1288b4d0799746c82611d17888413aec.diff

LOG: [mlir][sparse] complete various FIXMEs in sparse support lib

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D159245

Added: 
    

Modified: 
    mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
    mlir/test/Dialect/SparseTensor/codegen.mlir

Removed: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir


################################################################################
diff  --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index ac466779e9e2ae..fc0d4a27b0787b 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -125,9 +125,6 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT)
 /// Constructs a new SparseTensorReader object, opens the file, reads the
 /// header, and validates that the actual contents of the file match
 /// the expected `dimShapeRef` and `valTp`.
-//
-// FIXME: update `SparseTensorCodegenPass` to use
-// `_mlir_ciface_createCheckedSparseTensorReader` instead.
 MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_createCheckedSparseTensorReader(
     char *filename, StridedMemRefType<index_type, 1> *dimShapeRef,
     PrimaryType valTp);
@@ -142,14 +139,6 @@ MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensorFromReader(
     StridedMemRefType<index_type, 1> *dim2lvlRef, OverheadType posTp,
     OverheadType crdTp, PrimaryType valTp);
 
-/// SparseTensorReader method to copy the dimension-sizes into the
-/// provided memref.
-//
-// FIXME: update `SparseTensorCodegenPass` to use
-// `_mlir_ciface_getSparseTensorReaderDimSizes` instead.
-MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_copySparseTensorReaderDimSizes(
-    void *p, StridedMemRefType<index_type, 1> *dref);
-
 /// SparseTensorReader method to obtain direct access to the
 /// dimension-sizes array.
 MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_getSparseTensorReaderDimSizes(
@@ -168,7 +157,7 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT)
 /// sorted.
 #define DECL_GETNEXT(VNAME, V, CNAME, C)                                       \
   MLIR_CRUNNERUTILS_EXPORT bool                                                \
-      _mlir_ciface_getSparseTensorReaderRead##CNAME##VNAME(                    \
+      _mlir_ciface_getSparseTensorReaderReadToBuffers##CNAME##VNAME(           \
           void *p, StridedMemRefType<index_type, 1> *dim2lvlRef,               \
           StridedMemRefType<C, 1> *iref, StridedMemRefType<V, 1> *vref)        \
           MLIR_SPARSETENSOR_FOREVERY_V_O(DECL_GETNEXT)
@@ -286,11 +275,6 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR)
 MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR)
 #undef DECL_CONVERTFROMMLIRSPARSETENSOR
 
-/// Creates a SparseTensorReader for reading a sparse tensor from a file with
-/// the given file name. This opens the file and read the header meta data based
-/// of the sparse tensor format derived from the suffix of the file name.
-MLIR_CRUNNERUTILS_EXPORT void *createSparseTensorReader(char *filename);
-
 /// Returns the rank of the sparse tensor being read.
 MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p);
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 9d833ec0c35b38..20df5f43e897ec 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1372,7 +1372,7 @@ struct SparseNewOpConverter : public OpConversionPattern<NewOp> {
       return failure();
 
     // Implement the NewOp(filename) as follows:
-    //   %reader = @getSparseTensorReader(%filename)
+    //   %reader = @createCheckedSparseTensorReader(%filename)
     //   %nse = @getSparseTensorNSE(%reader)
     //   %coo = bufferization.alloc_tensor an ordered COO with
     //          dst dim ordering, size_hint = %nse
@@ -1383,15 +1383,23 @@ struct SparseNewOpConverter : public OpConversionPattern<NewOp> {
     //   update storage specifier
     //   @delSparseTensorReader(%reader)
 
-    // Create a sparse tensor reader.
-    const Value fileName = op.getSource();
+    // Allocate `SparseTensorReader` and perform all initial setup that
+    // does not depend on lvlSizes (nor dimToLvl, lvlToDim, etc).
     const Type opaqueTp = getOpaquePointerType(rewriter);
-    // FIXME: use `createCheckedSparseTensorReader` instead, because
-    // `createSparseTensorReader` is unsafe.
-    Value reader = createFuncCall(rewriter, loc, "createSparseTensorReader",
-                                  {opaqueTp}, {fileName}, EmitCInterface::Off)
-                       .getResult(0);
-
+    const Value fileName = op.getSource();
+    SmallVector<Value> dimShapeValues;
+    for (const DynSize sh : dstTp.getDimShape()) {
+      const auto s = ShapedType::isDynamic(sh) ? 0 : sh;
+      dimShapeValues.push_back(constantIndex(rewriter, loc, s));
+    }
+    Value dimShapeBuffer = allocaBuffer(rewriter, loc, dimShapeValues);
+    Value valTp =
+        constantPrimaryTypeEncoding(rewriter, loc, dstTp.getElementType());
+    Value reader =
+        createFuncCall(rewriter, loc, "createCheckedSparseTensorReader",
+                       opaqueTp, {fileName, dimShapeBuffer, valTp},
+                       EmitCInterface::On)
+            .getResult(0);
     const Type indexTp = rewriter.getIndexType();
     const Dimension dimRank = dstTp.getDimRank();
     const Level lvlRank = dstTp.getLvlRank();
@@ -1400,18 +1408,18 @@ struct SparseNewOpConverter : public OpConversionPattern<NewOp> {
     // the sparse tensor reader.
     SmallVector<Value> dynSizes;
     if (dstTp.hasDynamicDimShape()) {
-      // FIXME: call `getSparseTensorReaderDimSizes` instead, because
-      // `copySparseTensorReaderDimSizes` copies the memref over,
-      // instead of just accessing the reader's memory directly.
-      Value dimSizes = genAlloca(rewriter, loc, dimRank, indexTp);
-      createFuncCall(rewriter, loc, "copySparseTensorReaderDimSizes", {},
-                     {reader, dimSizes}, EmitCInterface::On);
+      auto memTp = MemRefType::get({ShapedType::kDynamic}, indexTp);
+      Value dimSizesBuffer =
+          createFuncCall(rewriter, loc, "getSparseTensorReaderDimSizes", memTp,
+                         reader, EmitCInterface::On)
+              .getResult(0);
       for (const auto &d : llvm::enumerate(dstTp.getDimShape()))
         if (ShapedType::isDynamic(d.value()))
           dynSizes.push_back(rewriter.create<memref::LoadOp>(
-              loc, dimSizes, constantIndex(rewriter, loc, d.index())));
+              loc, dimSizesBuffer, constantIndex(rewriter, loc, d.index())));
     }
 
+    // Get the number of stored entries.
     Value nse = createFuncCall(rewriter, loc, "getSparseTensorReaderNSE",
                                {indexTp}, {reader}, EmitCInterface::Off)
                     .getResult(0);
@@ -1422,10 +1430,6 @@ struct SparseNewOpConverter : public OpConversionPattern<NewOp> {
     MutSparseTensorDescriptor desc(dstTp, fields);
 
     // Construct the `dimToLvl` buffer for handing off to the runtime library.
-    // FIXME: This code is (mostly) copied from the SparseTensorConversion.cpp
-    // handling of `NewOp`, and only handles permutations.  Fixing this
-    // requires waiting for wrengr to finish redoing the CL that handles
-    // all dim<->lvl stuff more robustly.
     SmallVector<Value> dimToLvlValues(dimRank);
     if (!dstTp.isIdentity()) {
       const auto dimToLvl = dstTp.getDimToLvl();
@@ -1449,9 +1453,7 @@ struct SparseNewOpConverter : public OpConversionPattern<NewOp> {
     const Type boolTp = rewriter.getIntegerType(1);
     const Type elemTp = dstTp.getElementType();
     const Type crdTp = dstTp.getCrdType();
-    // FIXME: This function name is weird; should rename to
-    // "sparseTensorReaderReadToBuffers".
-    SmallString<32> readToBuffersFuncName{"getSparseTensorReaderRead",
+    SmallString<32> readToBuffersFuncName{"getSparseTensorReaderReadToBuffers",
                                           overheadTypeFunctionSuffix(crdTp),
                                           primaryTypeFunctionSuffix(elemTp)};
     Value isSorted =

diff  --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index ff6d4a483a079b..ef68ae293e61cf 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -605,21 +605,6 @@ void *_mlir_ciface_createCheckedSparseTensorReader(
   return static_cast<void *>(reader);
 }
 
-// FIXME: update `SparseTensorCodegenPass` to use
-// `_mlir_ciface_getSparseTensorReaderDimSizes` instead.
-void _mlir_ciface_copySparseTensorReaderDimSizes(
-    void *p, StridedMemRefType<index_type, 1> *dimSizesRef) {
-  assert(p);
-  SparseTensorReader &reader = *static_cast<SparseTensorReader *>(p);
-  ASSERT_NO_STRIDE(dimSizesRef);
-  const uint64_t dimRank = MEMREF_GET_USIZE(dimSizesRef);
-  ASSERT_USIZE_EQ(dimSizesRef, reader.getRank());
-  index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef);
-  const index_type *fileSizes = reader.getDimSizes();
-  for (uint64_t d = 0; d < dimRank; ++d)
-    dimSizes[d] = fileSizes[d];
-}
-
 void _mlir_ciface_getSparseTensorReaderDimSizes(
     StridedMemRefType<index_type, 1> *out, void *p) {
   assert(out && p);
@@ -643,10 +628,8 @@ void _mlir_ciface_getSparseTensorReaderDimSizes(
 MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
 #undef IMPL_GETNEXT
 
-// FIXME: This function name is weird; should rename to
-// "sparseTensorReaderReadToBuffers".
 #define IMPL_GETNEXT(VNAME, V, CNAME, C)                                       \
-  bool _mlir_ciface_getSparseTensorReaderRead##CNAME##VNAME(                   \
+  bool _mlir_ciface_getSparseTensorReaderReadToBuffers##CNAME##VNAME(          \
       void *p, StridedMemRefType<index_type, 1> *dim2lvlRef,                   \
       StridedMemRefType<C, 1> *cref, StridedMemRefType<V, 1> *vref) {          \
     assert(p);                                                                 \
@@ -694,9 +677,6 @@ void *_mlir_ciface_newSparseTensorFromReader(
   const DimLevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef);
   const index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef);
   const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef);
-  //
-  // FIXME(wrengr): Really need to define a separate x-macro for handling
-  // all this. (Or ideally some better, entirely-
diff erent approach)
 #define CASE(p, c, v, P, C, V)                                                 \
   if (posTp == OverheadType::p && crdTp == OverheadType::c &&                  \
       valTp == PrimaryType::v)                                                 \
@@ -907,15 +887,6 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
 MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR)
 #undef IMPL_CONVERTFROMMLIRSPARSETENSOR
 
-// FIXME: update `SparseTensorCodegenPass` to use
-// `_mlir_ciface_createCheckedSparseTensorReader` instead.
-void *createSparseTensorReader(char *filename) {
-  SparseTensorReader *reader = new SparseTensorReader(filename);
-  reader->openFile();
-  reader->readHeader();
-  return static_cast<void *>(reader);
-}
-
 index_type getSparseTensorReaderRank(void *p) {
   return static_cast<SparseTensorReader *>(p)->getRank();
 }

diff  --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 60544638b4e4f9..cf8b1ba87d3035 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -685,12 +685,15 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
 //   CHECK-DAG: %[[A2:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[A3:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[A4:.*]] = arith.constant 2 : index
-//       CHECK: %[[A5:.*]] = call @createSparseTensorReader(%[[A0]])
-//       CHECK: %[[A6:.*]] = memref.alloca() : memref<2xindex>
-//       CHECK: %[[A7:.*]] = memref.cast %[[A6]] : memref<2xindex> to memref<?xindex>
-//       CHECK: call @copySparseTensorReaderDimSizes(%[[A5]], %[[A7]]) : (!llvm.ptr<i8>, memref<?xindex>) -> ()
-//       CHECK: %[[A8:.*]] = memref.load %[[A6]]{{\[}}%[[A3]]] : memref<2xindex>
-//       CHECK: %[[A9:.*]] = memref.load %[[A6]]{{\[}}%[[A2]]] : memref<2xindex>
+//   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : i32
+//       CHECK: %[[D0:.*]] = memref.alloca() : memref<2xindex>
+//       CHECK: %[[D1:.*]] = memref.cast %[[D0]] : memref<2xindex> to memref<?xindex>
+//       CHECK: memref.store %[[A3]], %[[D0]][%[[A3]]] : memref<2xindex
+//       CHECK: memref.store %[[A3]], %[[D0]][%[[A2]]] : memref<2xindex>
+//       CHECK: %[[A5:.*]] = call @createCheckedSparseTensorReader(%[[A0]], %[[D1]], %[[C2]])
+//       CHECK: %[[D2:.*]] = call @getSparseTensorReaderDimSizes(%0) : (!llvm.ptr<i8>) -> memref<?xindex>
+//       CHECK: %[[A8:.*]] = memref.load %[[D2]]{{\[}}%[[A3]]] : memref<?xindex>
+//       CHECK: %[[A9:.*]] = memref.load %[[D2]]{{\[}}%[[A2]]] : memref<?xindex>
 //       CHECK: %[[A10:.*]] = call @getSparseTensorReaderNSE(%[[A5]])
 //       CHECK: %[[A11:.*]] = arith.muli %[[A10]], %[[A4]] : index
 //       CHECK: %[[A12:.*]] = memref.alloc() : memref<2xindex>
@@ -709,7 +712,7 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
 //       CHECK: %[[A32:.*]] = memref.cast %[[A31]] : memref<2xindex> to memref<?xindex>
 //       CHECK: memref.store %[[A3]], %[[A31]]{{\[}}%[[A3]]] : memref<2xindex>
 //       CHECK: memref.store %[[A2]], %[[A31]]{{\[}}%[[A2]]] : memref<2xindex>
-//       CHECK: %[[A33:.*]] = call @getSparseTensorReaderRead0F32(%[[A5]], %[[A32]], %[[A14]], %[[A15]])
+//       CHECK: %[[A33:.*]] = call @getSparseTensorReaderReadToBuffers0F32(%[[A5]], %[[A32]], %[[A14]], %[[A15]])
 //       CHECK: %[[A34:.*]] = arith.cmpi eq, %[[A33]], %[[A1]] : i1
 //       CHECK: scf.if %[[A34]] {
 //       CHECK:   sparse_tensor.sort_coo  hybrid_quick_sort %[[A10]], %[[A14]] jointly %[[A15]] {nx = 2 : index, ny = 0 : index} : memref<?xindex> jointly memref<?xf32>
@@ -729,12 +732,15 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #Coo> {
 //   CHECK-DAG: %[[A1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[A2:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[A3:.*]] = arith.constant 2 : index
-//       CHECK: %[[A4:.*]] = call @createSparseTensorReader(%[[A0]])
-//       CHECK: %[[A5:.*]] = memref.alloca() : memref<2xindex>
-//       CHECK: %[[A6:.*]] = memref.cast %[[A5]] : memref<2xindex> to memref<?xindex>
-//       CHECK: call @copySparseTensorReaderDimSizes(%[[A4]], %[[A6]])
-//       CHECK: %[[A7:.*]] = memref.load %[[A5]]{{\[}}%[[A2]]] : memref<2xindex>
-//       CHECK: %[[A8:.*]] = memref.load %[[A5]]{{\[}}%[[A1]]] : memref<2xindex>
+//   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : i32
+//       CHECK: %[[D0:.*]] = memref.alloca() : memref<2xindex>
+//       CHECK: %[[D1:.*]] = memref.cast %[[D0]] : memref<2xindex> to memref<?xindex>
+//       CHECK: memref.store %[[A2]], %[[D0]][%[[A2]]] : memref<2xindex
+//       CHECK: memref.store %[[A2]], %[[D0]][%[[A1]]] : memref<2xindex>
+//       CHECK: %[[A4:.*]] = call @createCheckedSparseTensorReader(%[[A0]], %[[D1]], %[[C2]])
+//       CHECK: %[[D2:.*]] = call @getSparseTensorReaderDimSizes(%0) : (!llvm.ptr<i8>) -> memref<?xindex>
+//       CHECK: %[[A7:.*]] = memref.load %[[D2]]{{\[}}%[[A2]]] : memref<?xindex>
+//       CHECK: %[[A8:.*]] = memref.load %[[D2]]{{\[}}%[[A1]]] : memref<?xindex>
 //       CHECK: %[[A9:.*]] = call @getSparseTensorReaderNSE(%[[A4]])
 //       CHECK: %[[A10:.*]] = arith.muli %[[A9]], %[[A3]] : index
 //       CHECK: %[[A11:.*]] = memref.alloc() : memref<2xindex>
@@ -753,7 +759,7 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #Coo> {
 //       CHECK: %[[A31:.*]] = memref.cast %[[A30]] : memref<2xindex> to memref<?xindex>
 //       CHECK: memref.store %[[A1]], %[[A30]]{{\[}}%[[A2]]] : memref<2xindex>
 //       CHECK: memref.store %[[A2]], %[[A30]]{{\[}}%[[A1]]] : memref<2xindex>
-//       CHECK: %[[A32:.*]] = call @getSparseTensorReaderRead0F32(%[[A4]], %[[A31]], %[[A13]], %[[A14]])
+//       CHECK: %[[A32:.*]] = call @getSparseTensorReaderReadToBuffers0F32(%[[A4]], %[[A31]], %[[A13]], %[[A14]])
 //       CHECK: memref.store %[[A9]], %[[A26]]{{\[}}%[[A1]]] : memref<?xindex>
 //       CHECK: %[[A34:.*]] = sparse_tensor.storage_specifier.set %[[A29]]  crd_mem_sz at 0 with %[[A10]]
 //       CHECK: %[[A36:.*]] = sparse_tensor.storage_specifier.set %[[A34]]  val_mem_sz with %[[A9]]

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir
deleted file mode 100644
index 1b2fbe47639fc0..00000000000000
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir
+++ /dev/null
@@ -1,229 +0,0 @@
-//--------------------------------------------------------------------------------------------------
-// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
-//
-// Set-up that's shared across all tests in this directory. In principle, this
-// config could be moved to lit.local.cfg. However, there are downstream users that
-//  do not use these LIT config files. Hence why this is kept inline.
-//
-// DEFINE: %{sparse_compiler_opts} = enable-runtime-library=true
-// DEFINE: %{sparse_compiler_opts_sve} = enable-arm-sve=true %{sparse_compiler_opts}
-// DEFINE: %{compile} = mlir-opt %s --sparse-compiler="%{sparse_compiler_opts}"
-// DEFINE: %{compile_sve} = mlir-opt %s --sparse-compiler="%{sparse_compiler_opts_sve}"
-// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
-// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
-// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
-//
-// DEFINE: %{env} =
-//--------------------------------------------------------------------------------------------------
-
-// REDEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1=""
-// RUN: %{compile} | %{env} %{run} | FileCheck %s
-//
-// Do the same run, but now with direct IR generation.
-// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
-// RUN: %{compile} | %{env} %{run} | FileCheck %s
-//
-// Do the same run, but now with direct IR generation and vectorization.
-// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
-// RUN: %{compile} | %{env} %{run} | FileCheck %s
-//
-// Do the same run, but now with direct IR generation and VLA vectorization.
-// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %}
-
-!Filename = !llvm.ptr<i8>
-!TensorReader = !llvm.ptr<i8>
-!TensorWriter = !llvm.ptr<i8>
-
-module {
-
-  func.func private @getTensorFilename(index) -> (!Filename)
-
-  func.func private @createSparseTensorReader(!Filename) -> (!TensorReader)
-  func.func private @delSparseTensorReader(!TensorReader) -> ()
-  func.func private @getSparseTensorReaderRank(!TensorReader) -> (index)
-  func.func private @getSparseTensorReaderNSE(!TensorReader) -> (index)
-  func.func private @getSparseTensorReaderIsSymmetric(!TensorReader) -> (i1)
-  func.func private @copySparseTensorReaderDimSizes(!TensorReader,
-    memref<?xindex>) -> () attributes { llvm.emit_c_interface }
-  func.func private @getSparseTensorReaderRead0F32(!TensorReader,
-    memref<?xindex>, memref<?xindex>, memref<?xf32>)
-    -> (i1) attributes { llvm.emit_c_interface }
-  func.func private @getSparseTensorReaderNextF32(!TensorReader,
-    memref<?xindex>, memref<f32>) -> () attributes { llvm.emit_c_interface }
-
-  func.func private @createSparseTensorWriter(!Filename) -> (!TensorWriter)
-  func.func private @delSparseTensorWriter(!TensorWriter)
-  func.func private @outSparseTensorWriterMetaData(!TensorWriter, index, index,
-    memref<?xindex>) -> () attributes { llvm.emit_c_interface }
-  func.func private @outSparseTensorWriterNextF32(!TensorWriter, index,
-    memref<?xindex>, memref<f32>) -> () attributes { llvm.emit_c_interface }
-
-  func.func @dumpi(%arg0: memref<?xindex>) {
-    %c0 = arith.constant 0 : index
-    %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex>, vector<17xindex>
-    vector.print %v : vector<17xindex>
-    return
-  }
-
-  func.func @dumpi2(%arg0: memref<?xindex, strided<[2], offset: ?>>) {
-    %c0 = arith.constant 0 : index
-    %v = vector.transfer_read %arg0[%c0], %c0 :
-      memref<?xindex, strided<[2], offset: ?>>, vector<17xindex>
-    vector.print %v : vector<17xindex>
-    return
-  }
-
-  func.func @dumpf(%arg0: memref<?xf32>) {
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f32
-    %v = vector.transfer_read %arg0[%c0], %d0: memref<?xf32>, vector<17xf32>
-    vector.print %v : vector<17xf32>
-    return
-  }
-
-  // Returns the indices and values of the tensor.
-  func.func @readTensorFile(%tensor: !TensorReader)
-    -> (memref<?xindex>, memref<?xf32>, i1) {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %c2 = arith.constant 2 : index
-
-    %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index
-    %nse = call @getSparseTensorReaderNSE(%tensor) : (!TensorReader) -> index
-
-    // Assume rank == 2.
-    %isize = arith.muli %c2, %nse : index
-    %xs = memref.alloc(%isize) : memref<?xindex>
-    %vs = memref.alloc(%nse) : memref<?xf32>
-    %dim2lvl = memref.alloca(%c2) : memref<?xindex>
-    memref.store %c0, %dim2lvl[%c0] : memref<?xindex>
-    memref.store %c1, %dim2lvl[%c1] : memref<?xindex>
-    %isSorted =func.call @getSparseTensorReaderRead0F32(%tensor, %dim2lvl, %xs, %vs)
-        : (!TensorReader, memref<?xindex>, memref<?xindex>, memref<?xf32>) -> (i1)
-    return %xs, %vs, %isSorted : memref<?xindex>, memref<?xf32>, i1
-  }
-
-  // Reads a COO tensor from the given file name and prints its content.
-  func.func @readTensorFileAndDump(%fileName: !Filename) {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %c2 = arith.constant 2 : index
-    %tensor = call @createSparseTensorReader(%fileName)
-      : (!Filename) -> (!TensorReader)
-    %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index
-    vector.print %rank : index
-    %nse = call @getSparseTensorReaderNSE(%tensor) : (!TensorReader) -> index
-    vector.print %nse : index
-    %symmetric = call @getSparseTensorReaderIsSymmetric(%tensor)
-      : (!TensorReader) -> i1
-    vector.print %symmetric : i1
-    %dimSizes = memref.alloc(%rank) : memref<?xindex>
-    func.call @copySparseTensorReaderDimSizes(%tensor, %dimSizes)
-      : (!TensorReader, memref<?xindex>) -> ()
-    call @dumpi(%dimSizes) : (memref<?xindex>) -> ()
-
-    %xs, %vs, %isSorted = call @readTensorFile(%tensor)
-      : (!TensorReader) -> (memref<?xindex>, memref<?xf32>, i1)
-    %x0s = memref.subview %xs[%c0][%nse][%c2]
-      : memref<?xindex> to memref<?xindex, strided<[2], offset: ?>>
-    %x1s = memref.subview %xs[%c1][%nse][%c2]
-      : memref<?xindex> to memref<?xindex, strided<[2], offset: ?>>
-    vector.print %isSorted : i1
-    call @dumpi2(%x0s) : (memref<?xindex, strided<[2], offset: ?>>) -> ()
-    call @dumpi2(%x1s) : (memref<?xindex, strided<[2], offset: ?>>) -> ()
-    call @dumpf(%vs) : (memref<?xf32>) -> ()
-
-    // Release the resources.
-    call @delSparseTensorReader(%tensor) : (!TensorReader) -> ()
-    memref.dealloc %dimSizes : memref<?xindex>
-    memref.dealloc %xs : memref<?xindex>
-    memref.dealloc %vs : memref<?xf32>
-
-    return
-  }
-
-  // Reads a COO tensor from a file with fileName0 and writes its content to
-  // another file with fileName1.
-  func.func @createTensorFileFrom(%fileName0: !Filename, %fileName1: !Filename) {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-
-    %tensor0 = call @createSparseTensorReader(%fileName0)
-      : (!Filename) -> (!TensorReader)
-    %tensor1 = call @createSparseTensorWriter(%fileName1)
-      : (!Filename) -> (!TensorWriter)
-
-    %rank = call @getSparseTensorReaderRank(%tensor0) : (!TensorReader) -> index
-    %nse = call @getSparseTensorReaderNSE(%tensor0) : (!TensorReader) -> index
-    %dimSizes = memref.alloc(%rank) : memref<?xindex>
-    func.call @copySparseTensorReaderDimSizes(%tensor0, %dimSizes)
-      : (!TensorReader, memref<?xindex>) -> ()
-    call @outSparseTensorWriterMetaData(%tensor1, %rank, %nse, %dimSizes)
-      : (!TensorWriter, index, index, memref<?xindex>) -> ()
-
-    //TODO: handle isSymmetric.
-    // Assume rank == 2.
-    %indices = memref.alloc(%rank) : memref<?xindex>
-    %value = memref.alloca() : memref<f32>
-    scf.for %i = %c0 to %nse step %c1 {
-      func.call @getSparseTensorReaderNextF32(%tensor0, %indices, %value)
-        : (!TensorReader, memref<?xindex>, memref<f32>) -> ()
-      func.call @outSparseTensorWriterNextF32(%tensor1, %rank, %indices, %value)
-        : (!TensorWriter, index, memref<?xindex>, memref<f32>) -> ()
-    }
-
-    // Release the resources.
-    call @delSparseTensorReader(%tensor0) : (!TensorReader) -> ()
-    call @delSparseTensorWriter(%tensor1) : (!TensorWriter) -> ()
-    memref.dealloc %dimSizes : memref<?xindex>
-    memref.dealloc %indices : memref<?xindex>
-
-    return
-  }
-
-  func.func @entry() {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %fileName0 = call @getTensorFilename(%c0) : (index) -> (!Filename)
-    %fileName1 = call @getTensorFilename(%c1) : (index) -> (!Filename)
-
-    // Write the sparse tensor data from file through the SparseTensorReader and
-    // print the data.
-    // CHECK: 2
-    // CHECK: 17
-    // CHECK: 0
-    // CHECK: ( 4, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK: 1
-    // CHECK: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 )
-    // CHECK: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255 )
-    // CHECK: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17 )
-    call @readTensorFileAndDump(%fileName0) : (!Filename) -> ()
-
-    // Write the sparse tensor data to std::cout through the SparseTensorWriter.
-    // CHECK: # extended FROSTT format
-    // CHECK: 2 17
-    // CHECK: 4 256
-    // CHECK: 1 1 -1
-    // CHECK: 1 127 2
-    // CHECK: 1 128 -3
-    // CHECK: 1 255 4
-    // CHECK: 2 2 -5
-    // CHECK: 2 254 6
-    // CHECK: 3 3 -7
-    // CHECK: 4 1 8
-    // CHECK: 4 2 -9
-    // CHECK: 4 4 10
-    // CHECK: 4 99 -11
-    // CHECK: 4 127 12
-    // CHECK: 4 128 -13
-    // CHECK: 4 129 14
-    // CHECK: 4 250 -15
-    // CHECK: 4 254 16
-    // CHECK: 4 256 -17
-    call @createTensorFileFrom(%fileName0, %fileName1)
-      : (!Filename, !Filename) -> ()
-
-    return
-  }
-}


        


More information about the Mlir-commits mailing list