[Mlir-commits] [mlir] 9f1f21c - [mlir][sparse] Replace pass-by-value with pass-by-memref for C interface routines to fix Windows build.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Oct 12 14:52:27 PDT 2022


Author: bixia1
Date: 2022-10-12T14:52:19-07:00
New Revision: 9f1f21c49de3e2227702be125b3730c8c5133f27

URL: https://github.com/llvm/llvm-project/commit/9f1f21c49de3e2227702be125b3730c8c5133f27
DIFF: https://github.com/llvm/llvm-project/commit/9f1f21c49de3e2227702be125b3730c8c5133f27.diff

LOG: [mlir][sparse] Replace pass-by-value with pass-by-memref for C interface routines to fix Windows build.

Reviewed By: aartbik, wrengr

Differential Revision: https://reviews.llvm.org/D135811

Added: 
    

Modified: 
    mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
    mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index 9da0300d903dd..ba2dc51bb3b93 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -240,8 +240,9 @@ MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p);
 
 /// Returns the next element for the sparse tensor being read.
 #define IMPL_GETNEXT(VNAME, V)                                                 \
-  MLIR_CRUNNERUTILS_EXPORT V _mlir_ciface_getSparseTensorReaderNext##VNAME(    \
-      void *p, StridedMemRefType<index_type, 1> *iref);
+  MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_getSparseTensorReaderNext##VNAME( \
+      void *p, StridedMemRefType<index_type, 1> *iref,                         \
+      StridedMemRefType<V, 0> *vref);
 MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
 #undef IMPL_GETNEXT
 
@@ -266,7 +267,7 @@ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterMetaData(
 #define IMPL_OUTNEXT(VNAME, V)                                                 \
   MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterNext##VNAME( \
       void *p, index_type rank, StridedMemRefType<index_type, 1> *iref,        \
-      V value);
+      StridedMemRefType<V, 0> *vref);
 MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTNEXT)
 #undef IMPL_OUTNEXT
 

diff  --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index 84aa114533d8e..0191fd144a17e 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -618,9 +618,10 @@ void delSparseTensorReader(void *p) {
 }
 
 #define IMPL_GETNEXT(VNAME, V)                                                 \
-  V _mlir_ciface_getSparseTensorReaderNext##VNAME(                             \
-      void *p, StridedMemRefType<index_type, 1> *iref) {                       \
-    assert(p &&iref);                                                          \
+  void _mlir_ciface_getSparseTensorReaderNext##VNAME(                          \
+      void *p, StridedMemRefType<index_type, 1> *iref,                         \
+      StridedMemRefType<V, 0> *vref) {                                         \
+    assert(p &&iref &&vref);                                                   \
     assert(iref->strides[0] == 1);                                             \
     index_type *indices = iref->data + iref->offset;                           \
     SparseTensorReader *stfile = static_cast<SparseTensorReader *>(p);         \
@@ -630,7 +631,8 @@ void delSparseTensorReader(void *p) {
       uint64_t idx = strtoul(linePtr, &linePtr, 10);                           \
       indices[r] = idx - 1;                                                    \
     }                                                                          \
-    return detail::readCOOValue<V>(&linePtr, stfile->isPattern());             \
+    V *value = vref->data + vref->offset;                                      \
+    *value = detail::readCOOValue<V>(&linePtr, stfile->isPattern());           \
   }
 MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
 #undef IMPL_GETNEXT
@@ -667,14 +669,15 @@ void _mlir_ciface_outSparseTensorWriterMetaData(
 #define IMPL_OUTNEXT(VNAME, V)                                                 \
   void _mlir_ciface_outSparseTensorWriterNext##VNAME(                          \
       void *p, index_type rank, StridedMemRefType<index_type, 1> *iref,        \
-      V value) {                                                               \
-    assert(p &&iref);                                                          \
+      StridedMemRefType<V, 0> *vref) {                                         \
+    assert(p &&iref &&vref);                                                   \
     assert(iref->strides[0] == 1);                                             \
     index_type *indices = iref->data + iref->offset;                           \
     SparseTensorWriter &file = *static_cast<SparseTensorWriter *>(p);          \
     for (uint64_t r = 0; r < rank; ++r)                                        \
       file << (indices[r] + 1) << " ";                                         \
-    file << value << std::endl;                                                \
+    V *value = vref->data + vref->offset;                                      \
+    file << *value << std::endl;                                               \
   }
 MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTNEXT)
 #undef IMPL_OUTNEXT

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir
index e30072ddc9d6e..8e263b529e822 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir
@@ -19,17 +19,17 @@ module {
   func.func private @getSparseTensorReaderRank(!TensorReader) -> (index)
   func.func private @getSparseTensorReaderNNZ(!TensorReader) -> (index)
   func.func private @getSparseTensorReaderIsSymmetric(!TensorReader) -> (i1)
-  func.func private @getSparseTensorReaderDimSizes(!TensorReader, memref<?xindex>)
-    -> () attributes { llvm.emit_c_interface }
-  func.func private @getSparseTensorReaderNextF32(!TensorReader, memref<?xindex>)
-    -> (f32) attributes { llvm.emit_c_interface }
+  func.func private @getSparseTensorReaderDimSizes(!TensorReader,
+    memref<?xindex>) -> () attributes { llvm.emit_c_interface }
+  func.func private @getSparseTensorReaderNextF32(!TensorReader,
+    memref<?xindex>, memref<f32>) -> () attributes { llvm.emit_c_interface }
 
   func.func private @createSparseTensorWriter(!Filename) -> (!TensorWriter)
   func.func private @delSparseTensorWriter(!TensorWriter)
   func.func private @outSparseTensorWriterMetaData(!TensorWriter, index, index,
     memref<?xindex>) -> () attributes { llvm.emit_c_interface }
   func.func private @outSparseTensorWriterNextF32(!TensorWriter, index,
-    memref<?xindex>, f32) -> () attributes { llvm.emit_c_interface }
+    memref<?xindex>, memref<f32>) -> () attributes { llvm.emit_c_interface }
 
   func.func @dumpi(%arg0: memref<?xindex>) {
     %c0 = arith.constant 0 : index
@@ -60,9 +60,13 @@ module {
     %x1s = memref.alloc(%nnz) : memref<?xindex>
     %vs = memref.alloc(%nnz) : memref<?xf32>
     %indices = memref.alloc(%rank) : memref<?xindex>
+    %value = memref.alloca() : memref<f32>
     scf.for %i = %c0 to %nnz step %c1 {
-      %v = func.call @getSparseTensorReaderNextF32(%tensor, %indices)
-        : (!TensorReader, memref<?xindex>) -> f32
+      func.call @getSparseTensorReaderNextF32(%tensor, %indices, %value)
+        : (!TensorReader, memref<?xindex>, memref<f32>) -> ()
+      // TODO: can we use memref.subview to avoid the need for the %value
+      //       buffer?
+      %v = memref.load %value[] : memref<f32>
       memref.store %v, %vs[%i] : memref<?xf32>
       %i0 = memref.load %indices[%c0] : memref<?xindex>
       memref.store %i0, %x0s[%i] : memref<?xindex>
@@ -129,11 +133,12 @@ module {
     //TODO: handle isSymmetric.
     // Assume rank == 2.
     %indices = memref.alloc(%rank) : memref<?xindex>
+    %value = memref.alloca() : memref<f32>
     scf.for %i = %c0 to %nnz step %c1 {
-      %v = func.call @getSparseTensorReaderNextF32(%tensor0, %indices)
-        : (!TensorReader, memref<?xindex>) -> f32
-      func.call @outSparseTensorWriterNextF32(%tensor1, %rank, %indices, %v)
-        : (!TensorWriter, index, memref<?xindex>, f32) -> ()
+      func.call @getSparseTensorReaderNextF32(%tensor0, %indices, %value)
+        : (!TensorReader, memref<?xindex>, memref<f32>) -> ()
+      func.call @outSparseTensorWriterNextF32(%tensor1, %rank, %indices, %value)
+        : (!TensorWriter, index, memref<?xindex>, memref<f32>) -> ()
     }
 
     // Release the resources.


        


More information about the Mlir-commits mailing list