[Mlir-commits] [mlir] da74956 - [mlir][sparse] Extend more integration to run on the codegen path.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Nov 11 09:42:28 PST 2022


Author: bixia1
Date: 2022-11-11T09:42:23-08:00
New Revision: da749560e11b3a581e1dfc87d497a14a99be6d59

URL: https://github.com/llvm/llvm-project/commit/da749560e11b3a581e1dfc87d497a14a99be6d59
DIFF: https://github.com/llvm/llvm-project/commit/da749560e11b3a581e1dfc87d497a14a99be6d59.diff

LOG: [mlir][sparse] Extend more integration to run on the codegen path.

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D137850

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 947360d2fecc..ed799090ca3f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -1,4 +1,11 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
 // RUN: TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index c608590c7962..5a721262f84d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index 6438cfb0ec37..260136ccf062 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
index fed119edbb2d..8bab24d4ff38 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
@@ -1,5 +1,12 @@
-// RUN: mlir-opt %s --sparse-compiler | \
-// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
 // RUN: FileCheck %s
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
index b10cca1ac5a6..abc1ae24854f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false enable-buffer-initialization=true" | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
@@ -26,7 +32,7 @@ module {
   //
   func.func @dump(%arg0: memref<?xf64>) {
     %c = arith.constant 0 : index
-    %d = arith.constant -1.0 : f64
+    %d = arith.constant 0.0 : f64
     %0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
     vector.print %0 : vector<8xf64>
     return
@@ -51,12 +57,12 @@ module {
     //
     // All proper row-/column-wise?
     //
-    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
-    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
-    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
-    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
-    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
-    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
+    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 0 )
+    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, 0 )
+    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 0 )
+    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, 0 )
+    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, 0 )
+    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 0 )
     //
     %m1 = sparse_tensor.values %1 : tensor<?x?xf64, #DCSR> to memref<?xf64>
     %m2 = sparse_tensor.values %2 : tensor<?x?xf64, #DCSC> to memref<?xf64>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
index aff5acf55154..3fcd89d02bcd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false enable-buffer-initialization=true" | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
@@ -37,28 +43,28 @@ module {
   //
   func.func @dumpf64(%arg0: memref<?xf64>) {
     %c = arith.constant 0 : index
-    %d = arith.constant -1.0 : f64
+    %d = arith.constant 0.0 : f64
     %0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
     vector.print %0 : vector<8xf64>
     return
   }
   func.func @dumpi08(%arg0: memref<?xi8>) {
     %c = arith.constant 0 : index
-    %d = arith.constant -1 : i8
+    %d = arith.constant 0 : i8
     %0 = vector.transfer_read %arg0[%c], %d: memref<?xi8>, vector<8xi8>
     vector.print %0 : vector<8xi8>
     return
   }
   func.func @dumpi32(%arg0: memref<?xi32>) {
     %c = arith.constant 0 : index
-    %d = arith.constant -1 : i32
+    %d = arith.constant 0 : i32
     %0 = vector.transfer_read %arg0[%c], %d: memref<?xi32>, vector<8xi32>
     vector.print %0 : vector<8xi32>
     return
   }
   func.func @dumpi64(%arg0: memref<?xi64>) {
     %c = arith.constant 0 : index
-    %d = arith.constant -1 : i64
+    %d = arith.constant 0 : i64
     %0 = vector.transfer_read %arg0[%c], %d: memref<?xi64>, vector<8xi64>
     vector.print %0 : vector<8xi64>
     return
@@ -84,12 +90,12 @@ module {
     //
     // All proper row-/column-wise?
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, -1 )
-    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, -1 )
-    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, -1 )
-    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, -1 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, -1 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, -1 )
+    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 0 )
+    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, 0 )
+    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, 0 )
+    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, 0 )
+    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0 )
+    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0 )
     //
     %m1 = sparse_tensor.values %1 : tensor<32x64xf64, #DCSR> to memref<?xf64>
     %m2 = sparse_tensor.values %2 : tensor<32x64xf64, #DCSC> to memref<?xf64>
@@ -107,12 +113,12 @@ module {
     //
     // Sanity check on indices.
     //
-    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, -1 )
-    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, -1 )
-    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, -1 )
-    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, -1 )
-    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, -1 )
-    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, -1 )
+    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 )
+    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, 0 )
+    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, 0 )
+    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, 0 )
+    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 )
+    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 )
     //
     %i1 = sparse_tensor.indices %1 { dimension = 1 : index } : tensor<32x64xf64, #DCSR> to memref<?xi8>
     %i2 = sparse_tensor.indices %2 { dimension = 1 : index } : tensor<32x64xf64, #DCSC> to memref<?xi64>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
index 497114ee906d..92860fa67661 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
index a890414adacb..fda6db68ecb9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false enable-buffer-initialization=true"| \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
@@ -36,7 +42,7 @@ module {
   // Driver method to call and verify kernel.
   func.func @entry() {
     %c0 = arith.constant 0 : index
-    %f1 = arith.constant -1.0 : f32
+    %f0 = arith.constant 0.0 : f32
 
     // Setup very sparse matrices.
     %ta = arith.constant sparse<
@@ -58,10 +64,10 @@ module {
     //
     // Verify results. Only two entries stored in result!
     //
-    // CHECK: ( 14, 20, -1, -1 )
+    // CHECK: ( 14, 20, 0, 0 )
     //
     %val = sparse_tensor.values %0 : tensor<32x16xf32, #DCSR> to memref<?xf32>
-    %vv = vector.transfer_read %val[%c0], %f1: memref<?xf32>, vector<4xf32>
+    %vv = vector.transfer_read %val[%c0], %f0: memref<?xf32>, vector<4xf32>
     vector.print %vv : vector<4xf32>
 
     // Release the resources.

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
index df04387dde84..7a95831d68b0 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false enable-buffer-initialization=true"| \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
@@ -46,7 +52,7 @@ module {
   // Driver method to call and verify tensor kernel.
   func.func @entry() {
     %c0 = arith.constant 0 : index
-    %i0 = arith.constant -1 : i32
+    %i0 = arith.constant 0 : i32
 
     // Setup very sparse 3-d tensors.
     %t1 = arith.constant sparse<
@@ -68,7 +74,7 @@ module {
     //
     // Verify results. Only two entries stored in result. Correct structure.
     //
-    // CHECK: ( 7, 69, -1, -1 )
+    // CHECK: ( 7, 69, 0, 0 )
     // CHECK-NEXT: ( ( 0, 0, 0 ), ( 0, 7, 0 ), ( 0, 0, 69 ) )
     //
     %val = sparse_tensor.values %0

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
index 05b7c68ef951..e6181db753a9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
@@ -1,4 +1,10 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=true | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \


        


More information about the Mlir-commits mailing list