[Mlir-commits] [mlir] b13cbf5 - [mlir][sparse] integration test for "simply dynamic" sparse output tensors

Aart Bik llvmlistbot at llvm.org
Tue Jun 22 14:28:11 PDT 2021


Author: Aart Bik
Date: 2021-06-22T14:28:02-07:00
New Revision: b13cbf537f5da58d091dfe4a4642ab19aea582fa

URL: https://github.com/llvm/llvm-project/commit/b13cbf537f5da58d091dfe4a4642ab19aea582fa
DIFF: https://github.com/llvm/llvm-project/commit/b13cbf537f5da58d091dfe4a4642ab19aea582fa.diff

LOG: [mlir][sparse] integration test for "simply dynamic" sparse output tensors

Reviewed By: gussmith23

Differential Revision: https://reviews.llvm.org/D104583

Added: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
new file mode 100644
index 0000000000000..fc196f2999ce5
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
@@ -0,0 +1,77 @@
+// RUN: mlir-opt %s \
+// RUN:   --sparsification --sparse-tensor-conversion \
+// RUN:   --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \
+// RUN:   --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
+// RUN:   --std-bufferize --finalizing-bufferize  \
+// RUN:   --convert-vector-to-llvm --convert-std-to-llvm | \
+// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+!Filename = type !llvm.ptr<i8>
+
+#DCSR = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed", "compressed" ],
+  dimOrdering = affine_map<(i,j) -> (i,j)>
+}>
+
+#eltwise_mult = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>  // X (out)
+  ],
+  iterator_types = ["parallel", "parallel"],
+  doc = "X(i,j) += X(i,j) * X(i,j)"
+}
+
+//
+// Integration test that lowers a kernel annotated as sparse to
+// actual sparse code, initializes a matching sparse storage scheme
+// from file, and runs the resulting code with the JIT compiler.
+//
+module {
+  //
+  // A kernel that multiplies a sparse matrix A with itself
+  // in an element-wise fashion. In this operation, we have
+  // a sparse tensor as output, but although the values of the
+  // sparse tensor change, its nonzero structure remains the same.
+  //
+  func @kernel_eltwise_mult(%argx: tensor<?x?xf64, #DCSR> {linalg.inplaceable = true})
+    -> tensor<?x?xf64, #DCSR> {
+    %0 = linalg.generic #eltwise_mult
+      outs(%argx: tensor<?x?xf64, #DCSR>) {
+      ^bb(%x: f64):
+        %0 = mulf %x, %x : f64
+        linalg.yield %0 : f64
+    } -> tensor<?x?xf64, #DCSR>
+    return %0 : tensor<?x?xf64, #DCSR>
+  }
+
+  func private @getTensorFilename(index) -> (!Filename)
+
+  //
+  // Main driver that reads matrix from file and calls the sparse kernel.
+  //
+  func @entry() {
+    %d0 = constant 0.0 : f64
+    %c0 = constant 0 : index
+
+    // Read the sparse matrix from file, construct sparse storage.
+    %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
+    %x = sparse_tensor.new %fileName : !llvm.ptr<i8> to tensor<?x?xf64, #DCSR>
+
+    // Call kernel.
+    %0 = call @kernel_eltwise_mult(%x) : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
+
+    // Print the result for verification.
+    //
+    // CHECK: ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25 )
+    //
+    %m = sparse_tensor.values %0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
+    %v = vector.transfer_read %m[%c0], %d0: memref<?xf64>, vector<9xf64>
+    vector.print %v : vector<9xf64>
+
+    return
+  }
+}


        


More information about the Mlir-commits mailing list