[Mlir-commits] [mlir] ca446e5 - [sparse][mlir] simplify sparse runtime support library

Aart Bik llvmlistbot at llvm.org
Tue May 25 09:39:21 PDT 2021


Author: Aart Bik
Date: 2021-05-25T09:39:14-07:00
New Revision: ca446e58c8417e942aca88a2d1572e3c12467b1f

URL: https://github.com/llvm/llvm-project/commit/ca446e58c8417e942aca88a2d1572e3c12467b1f
DIFF: https://github.com/llvm/llvm-project/commit/ca446e58c8417e942aca88a2d1572e3c12467b1f.diff

LOG: [sparse][mlir] simplify sparse runtime support library

Removed some of the older raw "MLIRized" versions that are
no longer needed now that the sparse runtime support library
can focus on the proper sparse tensor types rather than the
opague pointer approach of the past. This avoids legacy...

Reviewed By: penpornk

Differential Revision: https://reviews.llvm.org/D102960

Added: 
    

Modified: 
    mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
    mlir/lib/ExecutionEngine/SparseUtils.cpp

Removed: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir


################################################################################
diff  --git a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
index ffb450b1a3756..fb0b2a65a67eb 100644
--- a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
@@ -348,14 +348,4 @@ extern "C" MLIR_CRUNNERUTILS_EXPORT void printNewline();
 extern "C" MLIR_CRUNNERUTILS_EXPORT void print_flops(double flops);
 extern "C" MLIR_CRUNNERUTILS_EXPORT double rtclock();
 
-//===----------------------------------------------------------------------===//
-// Small runtime support library for sparse tensors.
-//===----------------------------------------------------------------------===//
-extern "C" MLIR_CRUNNERUTILS_EXPORT void *
-openTensorC(char *filename, uint64_t *idata, uint64_t *perm);
-extern "C" MLIR_CRUNNERUTILS_EXPORT void
-readTensorItemC(void *tensor, uint64_t *idata, double *ddata);
-extern "C" MLIR_CRUNNERUTILS_EXPORT void closeTensor(void *tensor);
-extern "C" MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(uint64_t id);
-
 #endif // EXECUTIONENGINE_CRUNNERUTILS_H_

diff  --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp
index 3c414f691c5bf..1c425e2e90df1 100644
--- a/mlir/lib/ExecutionEngine/SparseUtils.cpp
+++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp
@@ -241,20 +241,6 @@ class SparseTensorStorage : public SparseTensorStorageBase {
   std::vector<V> values;
 };
 
-/// Templated reader.
-template <typename P, typename I, typename V>
-void *newSparseTensor(char *filename, uint8_t *sparsity, uint64_t *perm,
-                      uint64_t size) {
-  uint64_t idata[64];
-  SparseTensor *t =
-      static_cast<SparseTensor *>(openTensorC(filename, idata, perm));
-  assert(size == t->getRank()); // sparsity array must match rank
-  SparseTensorStorageBase *tensor =
-      new SparseTensorStorage<P, I, V>(t, sparsity);
-  delete t;
-  return tensor;
-}
-
 /// Helper to convert string to lower case.
 static char *toLower(char *token) {
   for (char *c = token; *c; c++)
@@ -332,48 +318,9 @@ static void readExtFROSTTHeader(FILE *file, char *name, uint64_t *idata) {
   }
 }
 
-} // anonymous namespace
-
-//===----------------------------------------------------------------------===//
-//
-// Public API of the sparse runtime support library that enables MLIR code
-// to read a sparse tensor from an external format (MME for FROSTT).
-//
-// For example, a sparse matrix in MME can be read as follows.
-//
-//   %tensor = call @openTensor(%fileName, %idata)
-//     : (!llvm.ptr<i8>, memref<?xindex>) -> (!llvm.ptr<i8>)
-//   %rank = load %idata[%c0] : memref<?xindex>    # always 2 for MME
-//   %nnz  = load %idata[%c1] : memref<?xindex>
-//   %m    = load %idata[%c2] : memref<?xindex>
-//   %n    = load %idata[%c3] : memref<?xindex>
-//   .. prepare reading in m x n sparse tensor A with nnz nonzero elements ..
-//   scf.for %k = %c0 to %nnz step %c1 {
-//     call @readTensorItem(%tensor, %idata, %ddata)
-//       : (!llvm.ptr<i8>, memref<?xindex>, memref<?xf64>) -> ()
-//     %i = load %idata[%c0] : memref<?xindex>
-//     %j = load %idata[%c1] : memref<?xindex>
-//     %d = load %ddata[%c0] : memref<?xf64>
-//     .. process next nonzero element A[i][j] = d
-//        where the elements appear in lexicographic order ..
-//   }
-//   call @closeTensor(%tensor) : (!llvm.ptr<i8>) -> ()
-//
-//
-// Note that input parameters in the "MLIRized" version of a function mimic
-// the data layout of a MemRef<?xT> (but cannot use a direct struct). The
-// output parameter uses a direct struct.
-//
-//===----------------------------------------------------------------------===//
-
-extern "C" {
-
-/// Reads in a sparse tensor with the given filename. The call yields a
-/// pointer to an opaque memory-resident sparse tensor object that is only
-/// understood by other methods in the sparse runtime support library. An
-/// array parameter is used to pass the rank, the number of nonzero elements,
-/// and the dimension sizes (one per rank).
-void *openTensorC(char *filename, uint64_t *idata, uint64_t *perm) {
+/// Reads a sparse tensor with the given filename into a memory-resident
+/// sparse tensor in coordinate scheme.
+static SparseTensor *openTensor(char *filename, uint64_t *perm) {
   // Open the file.
   FILE *file = fopen(filename, "r");
   if (!file) {
@@ -381,6 +328,7 @@ void *openTensorC(char *filename, uint64_t *idata, uint64_t *perm) {
     exit(1);
   }
   // Perform some file format dependent set up.
+  uint64_t idata[512];
   if (strstr(filename, ".mtx")) {
     readMMEHeader(file, filename, idata);
   } else if (strstr(filename, ".tns")) {
@@ -395,10 +343,7 @@ void *openTensorC(char *filename, uint64_t *idata, uint64_t *perm) {
   uint64_t nnz = idata[1];
   std::vector<uint64_t> indices(rank);
   for (uint64_t r = 0; r < rank; r++)
-    if (perm)
-      indices[perm[r]] = idata[2 + r];
-    else
-      indices[r] = idata[2 + r];
+    indices[perm[r]] = idata[2 + r];
   SparseTensor *tensor = new SparseTensor(indices, nnz);
   // Read all nonzero elements.
   for (uint64_t k = 0; k < nnz; k++) {
@@ -409,10 +354,7 @@ void *openTensorC(char *filename, uint64_t *idata, uint64_t *perm) {
         exit(1);
       }
       // Add 0-based index.
-      if (perm)
-        indices[perm[r]] = idx - 1;
-      else
-        indices[r] = idx - 1;
+      indices[perm[r]] = idx - 1;
     }
     double value;
     if (fscanf(file, "%lg\n", &value) != 1) {
@@ -427,33 +369,21 @@ void *openTensorC(char *filename, uint64_t *idata, uint64_t *perm) {
   return tensor;
 }
 
-/// "MLIRized" version.
-void *openTensor(char *filename, uint64_t *ibase, uint64_t *idata,
-                 uint64_t ioff, uint64_t isize, uint64_t istride) {
-  assert(istride == 1);
-  return openTensorC(filename, idata + ioff, nullptr);
-}
-
-/// Yields the next element from the given opaque sparse tensor object.
-void readTensorItemC(void *tensor, uint64_t *idata, double *ddata) {
-  const Element &e = static_cast<SparseTensor *>(tensor)->next();
-  for (uint64_t r = 0, rank = e.indices.size(); r < rank; r++)
-    idata[r] = e.indices[r];
-  ddata[0] = e.value;
+/// Templated reader.
+template <typename P, typename I, typename V>
+void *newSparseTensor(char *filename, uint8_t *sparsity, uint64_t *perm,
+                      uint64_t size) {
+  SparseTensor *t = openTensor(filename, perm);
+  assert(size == t->getRank()); // sparsity array must match rank
+  SparseTensorStorageBase *tensor =
+      new SparseTensorStorage<P, I, V>(t, sparsity);
+  delete t;
+  return tensor;
 }
 
-/// "MLIRized" version.
-void readTensorItem(void *tensor, uint64_t *ibase, uint64_t *idata,
-                    uint64_t ioff, uint64_t isize, uint64_t istride,
-                    double *dbase, double *ddata, uint64_t doff, uint64_t dsize,
-                    uint64_t dstride) {
-  assert(istride == 1 && dstride == 1);
-  readTensorItemC(tensor, idata + ioff, ddata + doff);
-}
+} // anonymous namespace
 
-/// Closes the given opaque sparse tensor object, releasing its memory
-/// resources. After this call, the opaque object cannot be used anymore.
-void closeTensor(void *tensor) { delete static_cast<SparseTensor *>(tensor); }
+extern "C" {
 
 /// Helper method to read a sparse tensor filename from the environment,
 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir
deleted file mode 100644
index ed204d2d26492..0000000000000
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir
+++ /dev/null
@@ -1,153 +0,0 @@
-// RUN: mlir-opt %s \
-// RUN:  -convert-scf-to-std -convert-vector-to-scf \
-// RUN:  -convert-linalg-to-llvm -convert-vector-to-llvm -convert-std-to-llvm | \
-// RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \
-// RUN: mlir-cpu-runner \
-// RUN:  -e entry -entry-point-result=void  \
-// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
-// RUN: FileCheck %s
-
-//
-// Use descriptive names for opaque pointers.
-//
-!Filename = type !llvm.ptr<i8>
-!Tensor   = type !llvm.ptr<i8>
-
-module {
-  //
-  // Example of using the sparse runtime support library to read a sparse tensor
-  // in the FROSTT file format (http://frostt.io/tensors/file-formats.html).
-  //
-  func private @getTensorFilename(index) -> (!Filename)
-  func private @openTensor(!Filename, memref<?xindex>) -> (!Tensor)
-  func private @readTensorItem(!Tensor, memref<?xindex>, memref<?xf64>) -> ()
-  func private @closeTensor(!Tensor) -> ()
-
-  func @entry() {
-    %d0  = constant 0.0 : f64
-    %i0  = constant 0   : i64
-    %c0  = constant 0   : index
-    %c1  = constant 1   : index
-    %c2  = constant 2   : index
-    %c10 = constant 10  : index
-
-    //
-    // Setup memrefs to get meta data, indices and values.
-    // The index array should provide sufficient space.
-    //
-    %idata = memref.alloc(%c10) : memref<?xindex>
-    %ddata = memref.alloc(%c1)  : memref<?xf64>
-
-    //
-    // Obtain the sparse tensor filename through this test helper.
-    //
-    %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
-
-    //
-    // Read a sparse tensor. The call yields a pointer to an opaque
-    // memory-resident sparse tensor object that is only understood by
-    // other methods in the sparse runtime support library. This call also
-    // provides the rank and the number of nonzero elements (nnz) through
-    // a memref array.
-    //
-    %tensor = call @openTensor(%fileName, %idata) : (!Filename, memref<?xindex>) -> (!Tensor)
-
-    //
-    // Print some meta data.
-    //
-    %rank = memref.load %idata[%c0] : memref<?xindex>
-    %nnz  = memref.load %idata[%c1] : memref<?xindex>
-    vector.print %rank : index
-    vector.print %nnz  : index
-    scf.for %r = %c2 to %c10 step %c1 {
-      %d = memref.load %idata[%r] : memref<?xindex>
-      vector.print %d : index
-    }
-
-    //
-    // Now we are ready to read in the nonzero elements of the sparse tensor
-    // and insert these into a sparse storage scheme. In this example, we
-    // simply print the elements on the fly.
-    //
-    scf.for %k = %c0 to %nnz step %c1 {
-      call @readTensorItem(%tensor, %idata, %ddata) : (!Tensor, memref<?xindex>, memref<?xf64>) -> ()
-      //
-      // Build index vector and print element (here, using the
-      // knowledge that the read sparse tensor has rank 8).
-      //
-      %0 = vector.broadcast %i0 : i64 to vector<8xi64>
-      %1 = scf.for %r = %c0 to %rank step %c1 iter_args(%in = %0) -> vector<8xi64> {
-        %i  = memref.load %idata[%r] : memref<?xindex>
-        %ii = index_cast %i : index to i64
-        %ri = index_cast %r : index to i32
-        %out = vector.insertelement %ii, %in[%ri : i32] : vector<8xi64>
-        scf.yield %out : vector<8xi64>
-      }
-      %2 = memref.load %ddata[%c0] : memref<?xf64>
-      vector.print %1 : vector<8xi64>
-      vector.print %2 : f64
-    }
-
-    //
-    // Since at this point we have processed the contents, make sure to
-    // close the sparse tensor to release its memory resources.
-    //
-    call @closeTensor(%tensor) : (!Tensor) -> ()
-
-    //
-    // Verify that the results are as expected.
-    //
-    // CHECK: 8
-    // CHECK: 16
-    // CHECK: 7
-    // CHECK: 3
-    // CHECK: 3
-    // CHECK: 3
-    // CHECK: 3
-    // CHECK: 3
-    // CHECK: 5
-    // CHECK: 3
-    //
-    // CHECK:      ( 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: 1
-    // CHECK-NEXT: ( 0, 0, 0, 0, 0, 0, 0, 2 )
-    // CHECK-NEXT: 1.3
-    // CHECK-NEXT: ( 0, 0, 0, 0, 0, 0, 4, 0 )
-    // CHECK-NEXT: 1.5
-    // CHECK-NEXT: ( 0, 0, 0, 1, 0, 0, 0, 1 )
-    // CHECK-NEXT: 1.22
-    // CHECK-NEXT: ( 0, 0, 0, 1, 0, 0, 0, 2 )
-    // CHECK-NEXT: 1.23
-    // CHECK-NEXT: ( 1, 0, 1, 0, 1, 1, 1, 0 )
-    // CHECK-NEXT: 2.111
-    // CHECK-NEXT: ( 1, 0, 1, 0, 1, 1, 1, 2 )
-    // CHECK-NEXT: 2.113
-    // CHECK-NEXT: ( 1, 1, 1, 0, 1, 1, 1, 0 )
-    // CHECK-NEXT: 2.11
-    // CHECK-NEXT: ( 1, 1, 1, 0, 1, 1, 1, 1 )
-    // CHECK-NEXT: 2.1
-    // CHECK-NEXT: ( 1, 1, 1, 1, 1, 1, 1, 1 )
-    // CHECK-NEXT: 2
-    // CHECK-NEXT: ( 2, 2, 2, 2, 0, 0, 1, 2 )
-    // CHECK-NEXT: 3.112
-    // CHECK-NEXT: ( 2, 2, 2, 2, 0, 1, 0, 2 )
-    // CHECK-NEXT: 3.121
-    // CHECK-NEXT: ( 2, 2, 2, 2, 0, 1, 1, 2 )
-    // CHECK-NEXT: 3.122
-    // CHECK-NEXT: ( 2, 2, 2, 2, 0, 2, 2, 2 )
-    // CHECK-NEXT: 3.1
-    // CHECK-NEXT: ( 2, 2, 2, 2, 2, 2, 2, 2 )
-    // CHECK-NEXT: 3
-    // CHECK-NEXT: ( 6, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: 7
-    //
-
-    //
-    // Free.
-    //
-    memref.dealloc %idata : memref<?xindex>
-    memref.dealloc %ddata : memref<?xf64>
-
-    return
-  }
-}

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir
deleted file mode 100644
index 719340354e2fb..0000000000000
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir
+++ /dev/null
@@ -1,121 +0,0 @@
-// RUN: mlir-opt %s \
-// RUN:  -convert-scf-to-std -convert-vector-to-scf \
-// RUN:  -convert-linalg-to-llvm -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-std-to-llvm | \
-// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
-// RUN: mlir-cpu-runner \
-// RUN:  -e entry -entry-point-result=void  \
-// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
-// RUN: FileCheck %s
-
-//
-// Use descriptive names for opaque pointers.
-//
-!Filename = type !llvm.ptr<i8>
-!Tensor   = type !llvm.ptr<i8>
-
-module {
-  //
-  // Example of using the sparse runtime support library to read a sparse matrix
-  // in the Matrix Market Exchange Format (https://math.nist.gov/MatrixMarket).
-  //
-  func private @getTensorFilename(index) -> (!Filename)
-  func private @openTensor(!Filename, memref<?xindex>) -> (!Tensor)
-  func private @readTensorItem(!Tensor, memref<?xindex>, memref<?xf64>) -> ()
-  func private @closeTensor(!Tensor) -> ()
-
-  func @entry() {
-    %d0  = constant 0.0 : f64
-    %c0  = constant 0 : index
-    %c1  = constant 1 : index
-    %c2  = constant 2 : index
-    %c3  = constant 3 : index
-    %c4  = constant 4 : index
-    %c5  = constant 5 : index
-
-    //
-    // Setup memrefs to get meta data, indices, and values.
-    //
-    %idata = memref.alloc(%c4) : memref<?xindex>
-    %ddata = memref.alloc(%c1) : memref<?xf64>
-
-    //
-    // Obtain the sparse matrix filename through this test helper.
-    //
-    %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
-
-    //
-    // Read a sparse matrix. The call yields a pointer to an opaque
-    // memory-resident sparse tensor object that is only understood by
-    // other methods in the sparse runtime support library. This call also
-    // provides the rank (always 2 for the Matrix Market), number of
-    // nonzero elements (nnz), and the size (m x n) through a memref array.
-    //
-    %tensor = call @openTensor(%fileName, %idata) : (!Filename, memref<?xindex>) -> (!Tensor)
-    %rank = memref.load %idata[%c0] : memref<?xindex>
-    %nnz  = memref.load %idata[%c1] : memref<?xindex>
-    %m    = memref.load %idata[%c2] : memref<?xindex>
-    %n    = memref.load %idata[%c3] : memref<?xindex>
-
-    //
-    // At this point, code should prepare a proper sparse storage scheme for
-    // an m x n matrix with nnz nonzero elements. For simplicity, here we
-    // simply intialize a dense m x n matrix to all zeroes.
-    //
-    %a = memref.alloc(%m, %n) : memref<?x?xf64>
-    scf.for %ii = %c0 to %m step %c1 {
-      scf.for %jj = %c0 to %n step %c1 {
-        memref.store %d0, %a[%ii, %jj] : memref<?x?xf64>
-      }
-    }
-
-    //
-    // Now we are ready to read in nnz nonzero elements of the sparse matrix
-    // and insert these into a sparse storage scheme. In this example, we
-    // simply insert them in the dense matrix.
-    //
-    scf.for %k = %c0 to %nnz step %c1 {
-      call @readTensorItem(%tensor, %idata, %ddata) : (!Tensor, memref<?xindex>, memref<?xf64>) -> ()
-      %i = memref.load %idata[%c0] : memref<?xindex>
-      %j = memref.load %idata[%c1] : memref<?xindex>
-      %d = memref.load %ddata[%c0] : memref<?xf64>
-      memref.store %d, %a[%i, %j] : memref<?x?xf64>
-    }
-
-    //
-    // Since at this point we have copied the sparse matrix to our own
-    // storage scheme, make sure to close the matrix to release its
-    // memory resources.
-    //
-    call @closeTensor(%tensor) : (!Tensor) -> ()
-
-    //
-    // Verify that the results are as expected.
-    //
-    %A = vector.transfer_read %a[%c0, %c0], %d0 : memref<?x?xf64>, vector<5x5xf64>
-    vector.print %rank : index
-    vector.print %nnz  : index
-    vector.print %m    : index
-    vector.print %n    : index
-    vector.print %A    : vector<5x5xf64>
-    //
-    // CHECK: 2
-    // CHECK: 9
-    // CHECK: 5
-    // CHECK: 5
-    //
-    // CHECK:      ( ( 1, 0, 0, 1.4, 0 ),
-    // CHECK-SAME:   ( 0, 2, 0, 0, 2.5 ),
-    // CHECK-SAME:   ( 0, 0, 3, 0, 0 ),
-    // CHECK-SAME:   ( 4.1, 0, 0, 4, 0 ),
-    // CHECK-SAME:   ( 0, 5.2, 0, 0, 5 ) )
-
-    //
-    // Free.
-    //
-    memref.dealloc %idata : memref<?xindex>
-    memref.dealloc %ddata : memref<?xf64>
-    memref.dealloc %a     : memref<?x?xf64>
-
-    return
-  }
-}


        


More information about the Mlir-commits mailing list