[Mlir-commits] [mlir] a8166d8 - [mlir][sparse] move sparse2sparse conversion to own test file
Aart Bik
llvmlistbot at llvm.org
Fri Aug 26 13:17:37 PDT 2022
Author: Aart Bik
Date: 2022-08-26T13:17:24-07:00
New Revision: a8166d8801258fe793c81dd5d6c5f11e67fea280
URL: https://github.com/llvm/llvm-project/commit/a8166d8801258fe793c81dd5d6c5f11e67fea280
DIFF: https://github.com/llvm/llvm-project/commit/a8166d8801258fe793c81dd5d6c5f11e67fea280.diff
LOG: [mlir][sparse] move sparse2sparse conversion to own test file
Rationale:
We were running *all* conversion tests two times, just to check the
difference of one indidivual test in that file. By splitting that test
out, we have a much more focused testing setup.
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D132757
Added:
mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir
Modified:
mlir/test/Dialect/SparseTensor/conversion.mlir
Removed:
################################################################################
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index 80cd2f0cec639..336d8158eb2af 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -1,10 +1,4 @@
-// First use with `kViaCOO` for sparse2sparse conversion (the old way).
-// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=1" \
-// RUN: --canonicalize --cse | FileCheck %s
-//
-// Now again with `kAuto` (the new default).
-// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=0" \
-// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECKAUTO
+// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s
#SparseVector = #sparse_tensor.encoding<{
dimLevelType = ["compressed"]
@@ -233,29 +227,15 @@ func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100
// CHECK-LABEL: func @sparse_convert_1d_ss(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
-// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32
-// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32
+// CHECK-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32
// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
-// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]])
-// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
-// CHECK: call @delSparseTensorCOOF32(%[[C]])
+// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-// CHECKAUTO-LABEL: func @sparse_convert_1d_ss(
-// CHECKAUTO-SAME: %[[A:.*]]: !llvm.ptr<i8>)
-// CHECKAUTO-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32
-// CHECKAUTO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-// CHECKAUTO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
-// CHECKAUTO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
-// CHECKAUTO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-// CHECKAUTO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
-// CHECKAUTO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
-// CHECKAUTO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]])
-// CHECKAUTO: return %[[T]] : !llvm.ptr<i8>
func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
%0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
return %0 : tensor<?xf32, #SparseVector32>
diff --git a/mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir
new file mode 100644
index 0000000000000..488f19cf495f5
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir
@@ -0,0 +1,49 @@
+// First use with `kViaCOO` for sparse2sparse conversion (the old way).
+// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=1" \
+// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECK-COO
+//
+// Now again with `kAuto` (the new default).
+// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=0" \
+// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECK-AUTO
+
+#SparseVector64 = #sparse_tensor.encoding<{
+ dimLevelType = ["compressed"],
+ pointerBitWidth = 64,
+ indexBitWidth = 64
+}>
+
+#SparseVector32 = #sparse_tensor.encoding<{
+ dimLevelType = ["compressed"],
+ pointerBitWidth = 32,
+ indexBitWidth = 32
+}>
+
+// CHECK-COO-LABEL: func @sparse_convert(
+// CHECK-COO-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK-COO-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32
+// CHECK-COO-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32
+// CHECK-COO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
+// CHECK-COO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-COO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-COO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
+// CHECK-COO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+// CHECK-COO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
+// CHECK-COO: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]])
+// CHECK-COO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
+// CHECK-COO: call @delSparseTensorCOOF32(%[[C]])
+// CHECK-COO: return %[[T]] : !llvm.ptr<i8>
+// CHECK-AUTO-LABEL: func @sparse_convert(
+// CHECK-AUTO-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK-AUTO-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32
+// CHECK-AUTO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
+// CHECK-AUTO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-AUTO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-AUTO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
+// CHECK-AUTO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+// CHECK-AUTO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
+// CHECK-AUTO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]])
+// CHECK-AUTO: return %[[T]] : !llvm.ptr<i8>
+func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
+ %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
+ return %0 : tensor<?xf32, #SparseVector32>
+}
More information about the Mlir-commits
mailing list