[Mlir-commits] [mlir] 51ebecf - [mlir][sparse] Changed sparsity properties to use _ instead of -
Yinying Li
llvmlistbot at llvm.org
Wed Aug 23 10:00:54 PDT 2023
Author: Yinying Li
Date: 2023-08-23T17:00:27Z
New Revision: 51ebecf3095b625de4dbb6bbc2333856bb56bfd4
URL: https://github.com/llvm/llvm-project/commit/51ebecf3095b625de4dbb6bbc2333856bb56bfd4
DIFF: https://github.com/llvm/llvm-project/commit/51ebecf3095b625de4dbb6bbc2333856bb56bfd4.diff
LOG: [mlir][sparse] Changed sparsity properties to use _ instead of -
Example: compressed-no -> compressed_no
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D158567
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
mlir/lib/Bindings/Python/DialectSparseTensor.cpp
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
mlir/test/Dialect/SparseTensor/codegen.mlir
mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
mlir/test/Dialect/SparseTensor/invalid.mlir
mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
mlir/test/Dialect/SparseTensor/roundtrip.mlir
mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
mlir/test/Dialect/SparseTensor/sorted_coo.mlir
mlir/test/Dialect/SparseTensor/sparse_2d.mlir
mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
mlir/test/Dialect/SparseTensor/sparse_pack.mlir
mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 72eefeaa9dc985..b51d272a790f4a 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -207,27 +207,27 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
case DimLevelType::Compressed:
return "compressed";
case DimLevelType::CompressedNu:
- return "compressed-nu";
+ return "compressed_nu";
case DimLevelType::CompressedNo:
- return "compressed-no";
+ return "compressed_no";
case DimLevelType::CompressedNuNo:
- return "compressed-nu-no";
+ return "compressed_nu_no";
case DimLevelType::Singleton:
return "singleton";
case DimLevelType::SingletonNu:
- return "singleton-nu";
+ return "singleton_nu";
case DimLevelType::SingletonNo:
- return "singleton-no";
+ return "singleton_no";
case DimLevelType::SingletonNuNo:
- return "singleton-nu-no";
+ return "singleton_nu_no";
case DimLevelType::CompressedWithHi:
- return "compressed-hi";
+ return "compressed_hi";
case DimLevelType::CompressedWithHiNu:
- return "compressed-hi-nu";
+ return "compressed_hi_nu";
case DimLevelType::CompressedWithHiNo:
- return "compressed-hi-no";
+ return "compressed_hi_no";
case DimLevelType::CompressedWithHiNuNo:
- return "compressed-hi-nu-no";
+ return "compressed_hi_nu_no";
case DimLevelType::TwoOutOfFour:
return "compressed24";
}
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index 4e94fb6134fb46..e2f3df005b70d6 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -200,7 +200,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
// Sorted Coordinate Scheme.
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
... tensor<?x?xf64, #SortedCOO> ...
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 33228476d37d52..ff61a0d035a2f1 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -1205,7 +1205,7 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
uint64_t trailCOOLen = 0, parentSz = 1, bufIdx = 0;
for (uint64_t l = 0; l < lvlRank; l++) {
if (!isUniqueLvl(l) && isCompressedLvl(l)) {
- // A `compressed-nu` level marks the start of trailing COO start level.
+ // A `compressed_nu` level marks the start of trailing COO start level.
// Since the coordinate buffer used for trailing COO are passed in as AoS
// scheme, and SparseTensorStorage uses a SoA scheme, we can not simply
// copy the value from the provided buffers.
@@ -1213,7 +1213,7 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
break;
}
assert(!isSingletonLvl(l) &&
- "Singleton level not following a compressed-nu level");
+ "Singleton level not following a compressed_nu level");
if (isCompressedLvl(l)) {
P *posPtr = reinterpret_cast<P *>(lvlBufs[bufIdx++]);
C *crdPtr = reinterpret_cast<C *>(lvlBufs[bufIdx++]);
diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
index d030883412640e..70805005f8afa8 100644
--- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
+++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
@@ -21,19 +21,19 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
.value("dense", MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE)
.value("compressed24", MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR)
.value("compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED)
- .value("compressed-nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU)
- .value("compressed-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO)
- .value("compressed-nu-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO)
+ .value("compressed_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU)
+ .value("compressed_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO)
+ .value("compressed_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO)
.value("singleton", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON)
- .value("singleton-nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
- .value("singleton-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
- .value("singleton-nu-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO)
- .value("compressed-hi", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI)
- .value("compressed-hi-nu",
+ .value("singleton_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
+ .value("singleton_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
+ .value("singleton_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO)
+ .value("compressed_hi", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI)
+ .value("compressed_hi_nu",
MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU)
- .value("compressed-hi-no",
+ .value("compressed_hi_no",
MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NO)
- .value("compressed-hi-nu-no",
+ .value("compressed_hi_nu_no",
MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU_NO);
mlir_attribute_subclass(m, "EncodingAttr",
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 344d1c03b83a98..e71d2a8dd623a6 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -824,7 +824,7 @@ Level mlir::sparse_tensor::toStoredDim(RankedTensorType type, Dimension d) {
//===----------------------------------------------------------------------===//
/// We normalized sparse tensor encoding attribute by always using
-/// ordered/unique DLT such that "compressed-nu-no" and "compressed-nu" (as well
+/// ordered/unique DLT such that "compressed_nu_no" and "compressed_nu" (as well
/// as other variants) lead to the same storage specifier type, and stripping
/// irrelevant fields that do not alter the sparse tensor memory layout.
static SparseTensorEncodingAttr
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 2acb359f1d0fb4..325cdfbaa4898f 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1183,7 +1183,7 @@ class SparseNumberOfEntriesConverter
ConversionPatternRewriter &rewriter) const override {
// Query memSizes for the actually stored values.
// FIXME: the nse value computed in this way might be wrong when there is
- // any "compressed-hi" level.
+ // any "compressed_hi" level.
rewriter.replaceOp(
op, genValMemSize(rewriter, op.getLoc(), adaptor.getTensor()));
return success();
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
index 560a535f120342..4f25322df2c90d 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
@@ -2,7 +2,7 @@
// RUN: --sparsification="enable-gpu-libgen" | FileCheck %s
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
module {
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index f83d89961e8286..60544638b4e4f9 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -27,7 +27,7 @@
}>
#UCSR = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-no" ]
+ lvlTypes = [ "dense", "compressed_no" ]
}>
#CSC = #sparse_tensor.encoding<{
@@ -47,16 +47,16 @@
}>
#Coo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CooPNo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-no" ],
+ lvlTypes = [ "compressed_nu", "singleton_no" ],
dimToLvl = affine_map<(i, j) -> (j, i)>
}>
#ccoo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed", "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed", "compressed_nu", "singleton" ]
}>
// CHECK-LABEL: func @sparse_nop(
@@ -523,7 +523,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
return %1 : tensor<8x8xf64, #CSR>
}
-// CHECK-LABEL: func.func private @"_insert_dense_compressed-no_8_8_f64_0_0"(
+// CHECK-LABEL: func.func private @_insert_dense_compressed_no_8_8_f64_0_0(
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -549,7 +549,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
// CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
// CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref<?xindex>
// CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
-// CHECK: %[[A21:.*]]:4 = func.call @"_insert_dense_compressed-no_8_8_f64_0_0"(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
+// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
// CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
// CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref<?xi1>
// CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
@@ -627,7 +627,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
return %1 : tensor<128xf64, #SparseVector>
}
-// CHECK-LABEL: func.func private @"_insert_compressed-nu_singleton_5_6_f64_0_0"(
+// CHECK-LABEL: func.func private @_insert_compressed_nu_singleton_5_6_f64_0_0(
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -643,7 +643,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
// CHECK-SAME: %[[A4:.*4]]: index,
// CHECK-SAME: %[[A5:.*5]]: f64)
-// CHECK: %[[R:.*]]:4 = call @"_insert_compressed-nu_singleton_5_6_f64_0_0"(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
+// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nu_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>
diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
index bdd7a3da4c641e..e1a901db5459f5 100644
--- a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}>
-#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed-nu", "singleton"]}>
+#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed_nu", "singleton"]}>
// CHECK-LABEL: func.func @sparse_alloc_copy_CSR(
// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>,
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index fd612d5f597d53..a0435bd9b0edcc 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -26,11 +26,11 @@
}>
#SortedCOO2D = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
}>
#SortedCOO3D = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
}>
@@ -40,7 +40,7 @@
}>
#COOSlice = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (2, 2, 1), (12, 13, 1) ]
}>
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index 1510c3d65b57a5..4849a1f25da6c3 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -32,7 +32,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord
// -----
-#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"], posWidth=32, crdWidth=32}>
+#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
-> tensor<100x2xf64, #SparseVector> {
@@ -68,7 +68,7 @@ func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: ten
// -----
-#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"], posWidth=32, crdWidth=32}>
+#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
// expected-error at +1 {{input/output trailing COO level-ranks don't match}}
@@ -270,7 +270,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)
// -----
-#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"]}>
+#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index {
// expected-error at +1 {{requested position memory size on a singleton level}}
diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
index feda768ed29f7a..de63043ea5cf46 100644
--- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
+++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
@@ -5,7 +5,7 @@
}>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#DCSR = #sparse_tensor.encoding<{
@@ -13,7 +13,7 @@
}>
#Slice = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (?, 1, 1), (?, 3, 1) ]
}>
diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
index 0bdeeeeece870b..c529578dc91688 100644
--- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
@@ -11,12 +11,12 @@
}>
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
// CHECK-LABEL: func.func @sparse_new(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> {
-// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>
+// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
// CHECK: bufferization.dealloc_tensor %[[COO]]
// CHECK: return %[[R]]
@@ -27,7 +27,7 @@ func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSR> {
// CHECK-LABEL: func.func @sparse_new_csc(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> {
-// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>
+// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
// CHECK: bufferization.dealloc_tensor %[[COO]]
// CHECK: return %[[R]]
@@ -37,8 +37,8 @@ func.func @sparse_new_csc(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSC> {
}
// CHECK-LABEL: func.func @sparse_new_coo(
-// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> {
-// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> {
+// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
// CHECK: return %[[COO]]
func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #COO> {
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #COO>
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index ace180369da7db..bd4baf57d8ba7c 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -103,7 +103,7 @@ func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xi
// -----
-#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"]}>
+#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
// CHECK-LABEL: func @sparse_indices_buffer(
// CHECK-SAME: %[[A:.*]]: tensor<?x?xf64, #{{.*}}>)
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index 6c17caad03dbf2..a3cb63e50015af 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -55,31 +55,31 @@ func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
// -----
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu-no", "singleton-no" ]
+ lvlTypes = [ "compressed_nu_no", "singleton_no" ]
}>
// CHECK-LABEL: func private @sparse_coo(
-// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu-no", "singleton-no" ] }>>)
+// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu_no", "singleton_no" ] }>>)
func.func private @sparse_coo(tensor<?x?xf32, #COO>)
// -----
#BCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ]
+ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]
}>
// CHECK-LABEL: func private @sparse_bcoo(
-// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ] }>>)
+// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ] }>>)
func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
// -----
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
// CHECK-LABEL: func private @sparse_sorted_coo(
-// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>)
+// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>)
func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
// -----
diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
index 01189328eeb611..e4bc55fcdb0629 100644
--- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
+++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#trait_scale = {
@@ -37,14 +37,14 @@
//
// CHECK-LABEL: func.func @sparse_scale(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> {
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> {
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f32
-// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xf32>
+// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xf32>
// CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_10:.*]] = scf.while (%[[VAL_11:.*]] = %[[VAL_8]]) : (index) -> index {
@@ -75,8 +75,8 @@
// CHECK: } {"Emitted from" = "linalg.generic"}
// CHECK: scf.yield %[[VAL_28:.*]] : index
// CHECK: } attributes {"Emitted from" = "linalg.generic"}
-// CHECK: %[[VAL_29:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>
-// CHECK: return %[[VAL_29]] : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>
+// CHECK: %[[VAL_29:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
+// CHECK: return %[[VAL_29]] : tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
// CHECK: }
func.func @sparse_scale(%argx: tensor<?x?xf32, #SortedCOO>) -> tensor<?x?xf32, #SortedCOO> {
%c = arith.constant 2.0 : f32
@@ -90,16 +90,16 @@ func.func @sparse_scale(%argx: tensor<?x?xf32, #SortedCOO>) -> tensor<?x?xf32, #
}
// CHECK-LABEL: func.func @matvec(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<64xf64>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
+// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xf64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
@@ -155,21 +155,21 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
}
// CHECK-LABEL: func.func @mateltmul(
-// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>,
-// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>,
+// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>,
+// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x64xf64>) -> tensor<32x64xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f64
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
-// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
-// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
+// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xf64>
+// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex>
+// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref<?xf64>
// CHECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64>
// CHECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>)
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 14187063e13c23..10d146922eb8e1 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
}
#BatchedVector = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi" ],
+ lvlTypes = [ "dense", "compressed_hi" ],
}>
// CHECK-LABEL: func.func @sub_ss_batched(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>,
diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
index 339c94d0f78e1d..baa30c9457d2dc 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
@@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) {
}
#BCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ],
+ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ],
}>
// CHECK-LABEL: func.func @foreach_bcoo(
diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir
index 0377ecc3090a17..45906f49835670 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --canonicalize --post-sparsification-rewrite="enable-runtime-library=false" --sparse-tensor-codegen -cse --canonicalize | FileCheck %s
#COO = #sparse_tensor.encoding<{
- lvlTypes = ["compressed-nu", "singleton"],
+ lvlTypes = ["compressed_nu", "singleton"],
crdWidth=32
}>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
index 8e022a36a5178c..ee1af7efadce4d 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
@@ -1,12 +1,12 @@
// RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s
-#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
-#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
// CHECK-LABEL: func.func @sparse_reshape_fused(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<5x6xf32>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<6x2x3xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>>) -> tensor<?x?x?xf32> {
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<6x2x3xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>>) -> tensor<?x?x?xf32> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 5 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index
diff --git a/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir b/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
index 4bb66642ffd610..636a41e64b07a8 100644
--- a/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
+++ b/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
@@ -15,7 +15,7 @@
}
#VEC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 32, crdWidth = 32 }>
-#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }>
//
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
index e8d9cbc8629468..a1c66b24a1a487 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
@@ -31,8 +31,8 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %}
-#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
-#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
module {
func.func private @printMemref3dF32(%ptr : tensor<?x?x?xf32>) attributes { llvm.emit_c_interface }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
index cd0cb36acb753a..3a2ae645fae8bd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
@@ -44,11 +44,11 @@
}>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOPerm = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimToLvl = affine_map<(i,j) -> (j,i)>
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
index b839f6206a043d..2ba3e57940befc 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
@@ -29,7 +29,7 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
#Tensor1 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
}>
#Tensor2 = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
index 93d96351782902..57b243f0a44e61 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
@@ -31,7 +31,7 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index 13453cf8b4de19..0280a51e614d85 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -38,16 +38,16 @@
}>
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#COO_SLICE = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
#COO_SLICE_DYN = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (?, ?, ?), (?, ?, ?) ]
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
index 341291dc974762..c933b157053b61 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
@@ -32,7 +32,7 @@
}>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
index d54e0beec25c92..6c39796ac66488 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -36,11 +36,11 @@
}>
#CCoo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed", "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed", "compressed_nu", "singleton" ]
}>
#DCoo = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-nu", "singleton" ]
+ lvlTypes = [ "dense", "compressed_nu", "singleton" ]
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
index 702961bb42acf8..7fe715d87a26ff 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -41,7 +41,7 @@
}>
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR_SLICE_1 = #sparse_tensor.encoding<{
@@ -55,12 +55,12 @@
}>
#COO_SLICE_1 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (0, 4, 2), (0, 4, 1) ]
}>
#COO_SLICE_2 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (0, 4, 2), (1, 4, 1) ]
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index 5e861fe38358ae..66e1632694cb4b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -27,11 +27,11 @@
// TODO: support sparse_tensor.unpack on libgen path.
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOI32 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
posWidth = 32,
crdWidth = 32
}>
@@ -43,7 +43,7 @@
}>
#BCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ]
+ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]
}>
module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
index 99d2a46ef4ad6c..10275c836739ca 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
@@ -27,11 +27,11 @@
// after sparse_tensor.unpack is supported on libgen path.
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOI32 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
posWidth = 32,
crdWidth = 32
}>
@@ -42,9 +42,9 @@
crdWidth = 32
}>
-// TODO: "compressed-hi" is not supported by libgen path.
+// TODO: "compressed_hi" is not supported by libgen path.
// #BCOO = #sparse_tensor.encoding<{
-// lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ]
+// lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]
//}>
module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
index 36f66e88037cfa..06209829918c4d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -35,20 +35,20 @@
!Filename = !llvm.ptr<i8>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOPermuted = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimToLvl = affine_map<(i,j) -> (j,i)>
}>
#SortedCOO3D = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
}>
#SortedCOO3DPermuted = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
index a3fb36ac273c22..5cb3e5e6cd45dc 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
@@ -32,7 +32,7 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
index 60c8e1ca3e43c7..1e51aae5f38926 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
@@ -25,7 +25,7 @@
// RUNNOT: %{compile} enable-runtime-library=false gpu-data-transfer-strategy=zero-copy" | %{run}
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
index bd8b11cb24d506..16a240838d7c4f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
@@ -25,7 +25,7 @@
//
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
More information about the Mlir-commits
mailing list