[Mlir-commits] [mlir] [mlir][sparse] Migrate more tests to new syntax (PR #66309)

Yinying Li llvmlistbot at llvm.org
Wed Sep 13 18:26:15 PDT 2023


https://github.com/yinying-lisa-li created https://github.com/llvm/llvm-project/pull/66309:

CSR:
`lvlTypes = [ "dense", "compressed" ]` to `map = (d0, d1) -> (d0 : dense, d1 : compressed)`

CSC:
`lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>` to `map = (d0, d1) -> (d1 : dense, d0 : compressed)`

>From 1120de5fa4b509c26090f59aaafb205c84fcf6c8 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Thu, 14 Sep 2023 01:07:55 +0000
Subject: [PATCH] [mlir][sparse] Migrate more tests to new syntax

CSR:
lvlTypes = [ "dense", "compressed" ] to map = (d0, d1) -> (d0 : dense, d1 : compressed)

CSC:
lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> to  map = (d0, d1) -> (d1 : dense, d0 : compressed)
---
 mlir/test/Dialect/Bufferization/ops.mlir      |  2 +-
 .../Dialect/Linalg/drop-unit-extent-dims.mlir |  2 +-
 .../Dialect/SparseTensor/GPU/gpu_combi.mlir   |  2 +-
 .../Dialect/SparseTensor/GPU/gpu_matmul.mlir  |  2 +-
 .../SparseTensor/GPU/gpu_matmul_lib.mlir      |  2 +-
 .../Dialect/SparseTensor/GPU/gpu_matvec.mlir  |  2 +-
 .../GPU/gpu_sampled_matmul_lib.mlir           |  2 +-
 .../SparseTensor/GPU/gpu_spgemm_lib.mlir      |  2 +-
 mlir/test/Dialect/SparseTensor/codegen.mlir   |  5 ++--
 .../SparseTensor/codegen_sparse_alloc.mlir    |  2 +-
 .../SparseTensor/codegen_sparse_dealloc.mlir  |  5 ++--
 .../test/Dialect/SparseTensor/conversion.mlir |  5 ++--
 .../SparseTensor/convert_dense2sparse.mlir    |  5 ++--
 .../SparseTensor/convert_sparse2dense.mlir    |  2 +-
 mlir/test/Dialect/SparseTensor/invalid.mlir   | 24 +++++++++----------
 .../SparseTensor/invalid_encoding.mlir        |  8 +++----
 mlir/test/Dialect/SparseTensor/pack_copy.mlir |  3 +--
 .../SparseTensor/rewriting_for_codegen.mlir   |  5 ++--
 mlir/test/Dialect/SparseTensor/semi_ring.mlir |  2 +-
 mlir/test/Dialect/SparseTensor/sparse_2d.mlir |  2 +-
 .../Dialect/SparseTensor/sparse_affine.mlir   |  2 +-
 .../Dialect/SparseTensor/sparse_expand.mlir   |  5 ++--
 .../SparseTensor/sparse_extract_slice.mlir    |  2 +-
 .../Dialect/SparseTensor/sparse_lower.mlir    |  2 +-
 .../SparseTensor/sparse_lower_col.mlir        |  3 +--
 .../SparseTensor/sparse_lower_inplace.mlir    |  2 +-
 .../SparseTensor/sparse_matmul_codegen.mlir   |  3 +--
 .../test/Dialect/SparseTensor/sparse_out.mlir |  3 +--
 .../Dialect/SparseTensor/sparse_parallel.mlir |  2 +-
 .../SparseTensor/sparse_parallel_reduce.mlir  |  2 +-
 .../Dialect/SparseTensor/sparse_vector.mlir   |  4 ++--
 .../SparseTensor/sparse_vector_chain.mlir     |  2 +-
 .../SparseTensor/sparse_vector_concat.mlir    |  2 +-
 .../SparseTensor/specifier_to_llvm.mlir       |  2 +-
 mlir/test/Dialect/SparseTensor/spy_sddmm.mlir |  2 +-
 .../Dialect/SparseTensor/transform-ops.mlir   |  2 +-
 .../Dialect/SparseTensor/unused-tensor.mlir   |  2 +-
 .../SparseTensor/CPU/concatenate_dim_0.mlir   |  5 ++--
 .../CPU/concatenate_dim_0_permute.mlir        |  5 ++--
 .../SparseTensor/CPU/concatenate_dim_1.mlir   |  5 ++--
 .../CPU/concatenate_dim_1_permute.mlir        |  5 ++--
 .../SparseTensor/CPU/dense_output.mlir        |  3 +--
 .../SparseTensor/CPU/dual_sparse_conv_2d.mlir |  5 ++--
 .../CPU/sparse_codegen_foreach.mlir           |  2 +-
 .../SparseTensor/CPU/sparse_conv_2d.mlir      |  5 ++--
 .../CPU/sparse_conversion_ptr.mlir            |  3 +--
 .../SparseTensor/CPU/sparse_coo_test.mlir     |  2 +-
 .../SparseTensor/CPU/sparse_expand.mlir       |  3 +--
 .../CPU/sparse_foreach_slices.mlir            |  2 +-
 .../SparseTensor/CPU/sparse_insert_2d.mlir    |  2 +-
 .../SparseTensor/CPU/sparse_matmul.mlir       |  3 +--
 .../SparseTensor/CPU/sparse_matmul_slice.mlir |  2 +-
 .../SparseTensor/CPU/sparse_matvec.mlir       |  2 +-
 .../Dialect/SparseTensor/CPU/sparse_pack.mlir |  2 +-
 .../SparseTensor/CPU/sparse_pack_libgen.mlir  |  2 +-
 .../CPU/sparse_reduce_custom.mlir             |  5 ++--
 .../CPU/sparse_reduce_custom_prod.mlir        |  5 ++--
 .../SparseTensor/CPU/sparse_scale.mlir        |  2 +-
 .../SparseTensor/CPU/sparse_select.mlir       |  5 ++--
 .../Dialect/SparseTensor/CPU/sparse_spmm.mlir |  2 +-
 .../SparseTensor/CPU/sparse_storage.mlir      |  5 ++--
 .../GPU/CUDA/sparse-gemm-lib.mlir             |  2 +-
 .../GPU/CUDA/sparse-matmul-lib.mlir           |  2 +-
 .../GPU/CUDA/sparse-matvec-const.mlir         |  2 +-
 .../GPU/CUDA/sparse-matvec-lib.mlir           |  2 +-
 .../SparseTensor/GPU/CUDA/sparse-matvec.mlir  |  2 +-
 .../GPU/CUDA/sparse-sampled-matmul-lib.mlir   |  2 +-
 .../python/dialects/sparse_tensor/dialect.py  |  3 +--
 68 files changed, 99 insertions(+), 124 deletions(-)

diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir
index 665f5697fdc5fdf..dc53e535bfe0d57 100644
--- a/mlir/test/Dialect/Bufferization/ops.mlir
+++ b/mlir/test/Dialect/Bufferization/ops.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL: func @test_clone
diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
index 88659f8628ae70a..795e9ee5287173f 100644
--- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
+++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
@@ -854,7 +854,7 @@ func.func @input_stays_same(%arg0 : memref<?x1x?xf32, strided<[?, 1, 1]>>, %arg1
   iterator_types = ["parallel", "reduction"]
 }
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 func.func @sparse_case(%arg0: tensor<8x8xf32, #CSR>, %arg1: tensor<8xf32>) -> tensor<8xf32> {
     %0 = tensor.empty() : tensor<8xf32>
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir
index 568487205ba3e34..0979884cbd502a5 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir
@@ -3,7 +3,7 @@
 // RUN:             --sparsification="parallelization-strategy=dense-outer-loop" \
 // RUN:             --sparse-gpu-codegen | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 //
 // CHECK-LABEL: gpu.module @sparse_kernels
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir
index b0fa5615c6c1f28..84265398d60cd87 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir
@@ -3,7 +3,7 @@
 // RUN:             --sparsification="parallelization-strategy=dense-outer-loop" \
 // RUN:             --sparse-gpu-codegen | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 //
 // Compute matrix matrix C = AB
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir
index 125a67b78498a80..73161bdb135ca4a 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s --linalg-generalize-named-ops \
 // RUN:             --sparsification="enable-gpu-libgen" | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 //
 // Compute matrix matrix C = AB
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir
index b9d33f2e2b0694f..b56f3a90aa27c34 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir
@@ -3,7 +3,7 @@
 // RUN:             --sparsification="parallelization-strategy=dense-outer-loop" \
 // RUN:             --sparse-gpu-codegen | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 //
 // Compute matrix vector y = Ax
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir
index 71641f33f82bd24..3c8e4c14e0c6a26 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir
@@ -19,7 +19,7 @@
   iterator_types = ["parallel", "parallel"]
 }
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 // CHECK-LABEL:   func.func @sparse_sampled_dd(
 // CHECK-SAME:                                 %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>,
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_spgemm_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_spgemm_lib.mlir
index d880a9688077bdd..7b4c48dc34105d0 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_spgemm_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_spgemm_lib.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s --linalg-generalize-named-ops \
 // RUN:             --sparsification="enable-gpu-libgen" | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 // CHECK-LABEL: func.func @matmulCSR(
 // CHECK-SAME:      %[[VAL_0:.*0]]: tensor<8x8xf32, #{{.*}}>,
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 5155e5ce6c45474..43d86a9f158f03c 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -21,7 +21,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   crdWidth = 64,
   posWidth = 32
 }>
@@ -31,8 +31,7 @@
 }>
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i, j) -> (j, i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #DCSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
index e1a901db5459f53..479642e5db4ed1e 100644
--- a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed_nu", "singleton"]}>
 
 // CHECK-LABEL:   func.func @sparse_alloc_copy_CSR(
diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir
index 1aff486e49fb2e2..59e568dd5de6461 100644
--- a/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir
@@ -6,10 +6,9 @@
 // RUN:    --sparse-tensor-codegen=create-sparse-deallocs=true \
 // RUN:    --canonicalize --cse | FileCheck %s -check-prefix=CHECK-DEALLOC
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed),
 }>
 
 //
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index ae9e312de7f2747..f8e30872a0756c7 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -17,12 +17,11 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #SparseTensor = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
index f2ac0c22e035ee4..4707b199222ad49 100644
--- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
@@ -7,12 +7,11 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i, j) -> (j, i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #SparseTensor = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir
index 7328ede697d96a9..363a63eb8ed1eca 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir
@@ -8,7 +8,7 @@
 }>
 
 #SparseMatrix = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #SparseTensor = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index 360dfcce2ef2bab..3091b0b8505d220 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -44,7 +44,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord
 
 // -----
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], posWidth=32, crdWidth=32}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth=32, crdWidth=32}>
 
 func.func @invalid_pack_mis_position(%values: tensor<6xf64>, %coordinates: tensor<6xi32>)
                                      -> tensor<2x100xf64, #CSR> {
@@ -80,7 +80,7 @@ func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: t
 
 // -----
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], posWidth=32, crdWidth=32}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth=32, crdWidth=32}>
 
 func.func @invalid_unpack_mis_position(%sp: tensor<2x100xf64, #CSR>, %values: tensor<6xf64>, %coordinates: tensor<6xi32>) {
   // expected-error at +1 {{inconsistent number of fields between input/output}}
@@ -297,7 +297,7 @@ func.func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: index, %arg2
 
 // -----
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 func.func @sparse_wrong_arity_insert(%arg0: tensor<128x64xf64, #CSR>, %arg1: index, %arg2: f64) {
   // expected-error at +1 {{'sparse_tensor.insert' op incorrect number of coordinates}}
@@ -347,7 +347,7 @@ func.func @sparse_unannotated_compression(%arg0: memref<?xf64>,
 
 // -----
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 func.func @sparse_wrong_arity_compression(%arg0: memref<?xf64>,
                                           %arg1: memref<?xi1>,
@@ -381,7 +381,7 @@ func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tens
 
 // -----
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 func.func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
   // expected-error at +1 {{unexpected conversion mismatch in dimension 1}}
@@ -632,7 +632,7 @@ func.func @invalid_select_wrong_yield(%arg0: f64) -> f64 {
 
 // -----
 
-#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 func.func @invalid_concat_less_inputs(%arg: tensor<9x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
   // expected-error at +1 {{Need at least two tensors to concatenate.}}
   %0 = sparse_tensor.concatenate %arg {dimension = 1 : index}
@@ -642,7 +642,7 @@ func.func @invalid_concat_less_inputs(%arg: tensor<9x4xf64, #DC>) -> tensor<9x4x
 
 // -----
 
-#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>,
                               %arg1: tensor<3x4xf64, #DC>,
                               %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
@@ -657,7 +657,7 @@ func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>,
 // -----
 
 #C = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
-#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #DCC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed", "compressed"]}>
 func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>,
                                         %arg1: tensor<3x4xf64, #DC>,
@@ -672,7 +672,7 @@ func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>,
 
 // -----
 
-#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 func.func @invalid_concat_size_mismatch_dyn(%arg0: tensor<?x4xf64, #DC>,
                                             %arg1: tensor<5x4xf64, #DC>,
                                             %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
@@ -686,7 +686,7 @@ func.func @invalid_concat_size_mismatch_dyn(%arg0: tensor<?x4xf64, #DC>,
 
 // -----
 
-#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 func.func @invalid_concat_size_mismatch(%arg0: tensor<3x4xf64, #DC>,
                                         %arg1: tensor<5x4xf64, #DC>,
                                         %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
@@ -700,7 +700,7 @@ func.func @invalid_concat_size_mismatch(%arg0: tensor<3x4xf64, #DC>,
 
 // -----
 
-#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 func.func @invalid_concat_size_mismatch(%arg0: tensor<2x4xf64, #DC>,
                                         %arg1: tensor<3x3xf64, #DC>,
                                         %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
@@ -844,7 +844,7 @@ func.func @sparse_sort_coo_y_too_small(%arg0: memref<60xindex>, %arg1: memref<10
 
 // -----
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 func.func @sparse_alloc_escapes(%arg0: index) -> tensor<10x?xf64, #CSR> {
   // expected-error at +1 {{sparse tensor allocation should not escape function}}
diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
index 7fa8e3f9d5a09eb..42eb4e0a46182e7 100644
--- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
@@ -7,13 +7,13 @@ func.func private @scalar(%arg0: tensor<f64, #a>) -> ()
 // -----
 
 // expected-error at +2 {{dimension-rank mismatch between encoding and tensor shape: 2 != 1}}
-#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#a = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 func.func private @tensor_dimlevel_size_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 
 // -----
 
-// expected-error at +1 {{level-rank mismatch between dimToLvl and lvlTypes: 1 != 2}}
-#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimToLvl = affine_map<(i) -> (i)>}>
+// expected-error at +1 {{use of undeclared identifier}}
+#a = #sparse_tensor.encoding<{map = (d0) -> (d0 : dense, d1 : compressed)}>
 func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 
 // -----
@@ -34,7 +34,7 @@ func.func private @tensor_dimtolvl_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 // -----
 
 // expected-error at +1 {{expected a permutation affine map for dimToLvl}}
-#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimToLvl = affine_map<(i,j) -> (i,i)>}>
+#a = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d0 : compressed)}>
 func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
 
 // -----
diff --git a/mlir/test/Dialect/SparseTensor/pack_copy.mlir b/mlir/test/Dialect/SparseTensor/pack_copy.mlir
index 93008261d9f1ea8..aee7793671c903b 100644
--- a/mlir/test/Dialect/SparseTensor/pack_copy.mlir
+++ b/mlir/test/Dialect/SparseTensor/pack_copy.mlir
@@ -1,8 +1,7 @@
 // RUN: mlir-opt %s --sparsification-and-bufferization | FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense","compressed"],
-  dimToLvl = affine_map<(i,j) -> (i,j)>,
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   crdWidth = 32,
   posWidth = 32
 }>
diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
index c529578dc916884..0312758722bea82 100644
--- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
@@ -2,12 +2,11 @@
 // RUN: FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i, j) -> (j, i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #COO = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/semi_ring.mlir b/mlir/test/Dialect/SparseTensor/semi_ring.mlir
index 762ef5f678e43ae..c69efcae3b08ec2 100644
--- a/mlir/test/Dialect/SparseTensor/semi_ring.mlir
+++ b/mlir/test/Dialect/SparseTensor/semi_ring.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -sparsification | FileCheck %s
 
-#SM = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 #trait = {
   indexing_maps = [
diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 10d146922eb8e1a..57e7459b302a471 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s -sparsification | FileCheck %s
 
 #Tdd = #sparse_tensor.encoding<{ lvlTypes = [ "dense",      "dense"      ] }>
-#Tds = #sparse_tensor.encoding<{ lvlTypes = [ "dense",      "compressed" ] }>
+#Tds = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 #Tsd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense"      ] }>
 #Tss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>
 
diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir
index 3a58c5ea9c17b6e..856c4d8a2e7d0ab 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s -sparsification | FileCheck %s
 
 #SpVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
-#CSR   = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR   = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 #Row   = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>
 #EncDenseVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }>
 
diff --git a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
index 98dfb4b88d6d34d..13b0cd43c775f2f 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
@@ -8,12 +8,11 @@
 // RUN:   FileCheck %s --check-prefix=CHECK-CONVERT
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [  "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [  "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #DCSC = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
index efb920b7af13c4e..d14d3638a9c2641 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s --sparse-tensor-codegen --cse |  FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
index 51aa81bae79d17f..3b8b86010edd28c 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
@@ -8,7 +8,7 @@
 // RUN: --tensor-bufferize --finalizing-bufferize |  \
 // RUN: FileCheck %s --check-prefix=CHECK-LIR
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = [ "dense", "compressed" ]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 #trait_matvec = {
   indexing_maps = [
diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
index ecca5fa363bb43a..9453d9f7d82c32a 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
@@ -9,8 +9,7 @@
 // RUN: FileCheck %s --check-prefix=CHECK-LIR
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #trait_matvec = {
diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
index 53cbd4ce88253c9..dc81acff5042398 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
@@ -8,7 +8,7 @@
 // RUN: --tensor-bufferize --finalizing-bufferize |  \
 // RUN: FileCheck %s --check-prefix=CHECK-LIR
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = [ "dense", "compressed" ]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 #trait_matvec = {
   indexing_maps = [
diff --git a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir
index 9db54f23f4bdcbc..b31ac3ef3a254ad 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir
@@ -5,8 +5,7 @@
 // RUN:  --canonicalize --cse | FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (i,j)>
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 //
diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
index 496db65be6ec224..04eccc7fb6d1892 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
@@ -1,8 +1,7 @@
 // RUN: mlir-opt %s -sparsification | FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (i,j)>
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
 }>
 
 #DCSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
index bbd2de7d4b5a019..4dabb92aa0f478d 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
@@ -18,7 +18,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #trait_dd = {
diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir
index 3f6bb8804edba18..09237b4c50f41ad 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir
@@ -2,7 +2,7 @@
 // RUN:   FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #trait_matvec = {
diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
index a438f9442fe203f..48ba9119c4f44ea 100755
--- a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
@@ -309,7 +309,7 @@ func.func @reduction_d(%arga: tensor<1024xf32, #DenseVector>,
 // -----
 
 #SparseMatrix = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 32,
   crdWidth = 32
 }>
@@ -448,7 +448,7 @@ func.func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>,
 
 // -----
 
-#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","compressed"]}>
+#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 #trait_affine = {
   indexing_maps = [
diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
index 01342c5d1991ede..0339414f5f09c10 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -sparsification -cse -sparse-vectorization="vl=8" -cse | \
 // RUN:   FileCheck %s
 
-#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","compressed"]}>
+#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 #trait = {
   indexing_maps = [
diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir
index b8521f21836f808..13aaf37dcb55b4e 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true"
 
 #MAT_D_C = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #MAT_C_C_P = #sparse_tensor.encoding<{
diff --git a/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir b/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir
index 50cc135d02724bb..b647fe0cdeed00e 100644
--- a/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir
+++ b/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -sparse-storage-specifier-to-llvm --cse --canonicalize | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 // CHECK-LABEL:   func.func @sparse_metadata_init() -> !llvm.struct<(array<2 x i64>, array<3 x i64>)> {
 // CHECK:           %[[VAL_0:.*]] = arith.constant 0 : i64
diff --git a/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir b/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir
index 8bc405a4ccf522f..eaa15d7f83bc47f 100755
--- a/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir
+++ b/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir
@@ -5,7 +5,7 @@
 // in-place update of the sampling sparse matrix.
 //
 
-#SM = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 #trait_sampled_dense_dense = {
   indexing_maps = [
diff --git a/mlir/test/Dialect/SparseTensor/transform-ops.mlir b/mlir/test/Dialect/SparseTensor/transform-ops.mlir
index 8032355641e8b35..4a9060b5c10cf00 100644
--- a/mlir/test/Dialect/SparseTensor/transform-ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/transform-ops.mlir
@@ -25,7 +25,7 @@ module attributes { transform.with_named_sequence } {
   }
 }
 
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 
 func.func @payload(%lhs: tensor<10x20xf16>,
                    %sp_lhs: tensor<10x20xf16, #CSR>,
diff --git a/mlir/test/Dialect/SparseTensor/unused-tensor.mlir b/mlir/test/Dialect/SparseTensor/unused-tensor.mlir
index 05da6c455135c2d..5f169dd989bdc48 100644
--- a/mlir/test/Dialect/SparseTensor/unused-tensor.mlir
+++ b/mlir/test/Dialect/SparseTensor/unused-tensor.mlir
@@ -7,7 +7,7 @@
 // compiler still generates the correct loop nest for this case.
 //
 
-#SM = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 #trait = {
   indexing_maps = [
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
index 5e3908396e9cb04..17ce38edd908689 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
@@ -31,7 +31,7 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}>
-#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}>
 #MAT_D_D = #sparse_tensor.encoding<{
   lvlTypes = ["dense", "dense"],
@@ -49,8 +49,7 @@
 }>
 
 #MAT_D_C_P = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
index b7fa3d48d1c30f5..e3ec5241c9733b1 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
@@ -31,7 +31,7 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}>
-#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}>
 #MAT_D_D = #sparse_tensor.encoding<{
   lvlTypes = ["dense", "dense"],
@@ -49,8 +49,7 @@
 }>
 
 #MAT_D_C_P = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
index e347104be2a595c..f9455dffa3ad167 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
@@ -28,7 +28,7 @@
 // RUN: %{compile} | %{run} | FileCheck %s
 
 #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}>
-#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}>
 #MAT_D_D = #sparse_tensor.encoding<{
   lvlTypes = ["dense", "dense"],
@@ -46,8 +46,7 @@
 }>
 
 #MAT_D_C_P = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
index d2b8622b47d5f23..9bb5f95e9c92995 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
@@ -31,7 +31,7 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}>
-#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}>
 #MAT_D_D = #sparse_tensor.encoding<{
   lvlTypes = ["dense", "dense"],
@@ -49,8 +49,7 @@
 }>
 
 #MAT_D_C_P = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 7ee385263d314e6..140c237d3e79cbb 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -39,8 +39,7 @@
 }>
 
 #SparseMatrix = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (i,j)>
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
 }>
 
 #trait_assign = {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
index 8b7e1c4bc7d9360..77f550d35237a01 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
@@ -31,11 +31,10 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #CDR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}>
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 // An example of a 2D convolution with a sparse filter.
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
index 3a2ae645fae8bdd..37cda138f675609 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
@@ -35,7 +35,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #DCSC = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
index 12308ec81dd464a..e00d74d108c71e6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
@@ -31,11 +31,10 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 #CDR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}>
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 // An example of a 2D convolution with a sparse filter.
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
index 81cee3ea28f1475..e364125e3fc6767 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
@@ -44,8 +44,7 @@
 }>
 
 #CSC  = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>,
+  map = (d0, d1) -> (d1 : dense, d0 : compressed),
   posWidth = 16,
   crdWidth = 32
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
index 57b243f0a44e611..fdbe84b9d332cf0 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
@@ -35,7 +35,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #trait = {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
index 81782b5629c683a..c5b62cea5a1dfa8 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
@@ -31,8 +31,7 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index 0280a51e614d856..cc472fc2ed8480f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -24,7 +24,7 @@
 // TODO: support slices on lib path
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
index c933b157053b619..a683c063e64a18e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
@@ -36,7 +36,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #DCSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
index 48c7055e6606fd3..35fcaeec1c616d9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
@@ -41,8 +41,7 @@
 // TODO: Investigate the output generated for SVE, see https://github.com/llvm/llvm-project/issues/60626
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (i,j)>
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #DCSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
index 7fe715d87a26ff4..2de3ca82dcdee74 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -32,7 +32,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index 65b5e5610a55be0..19648b25fd7c15f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -43,7 +43,7 @@
 !Filename = !llvm.ptr<i8>
 
 #SparseMatrix = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 8,
   crdWidth = 8
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index d95efb507765403..171c4afb3ea700e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -37,7 +37,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 32,
   crdWidth = 32
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
index 10275c836739ca8..9a48e7d466f923d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
@@ -37,7 +37,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 32,
   crdWidth = 32
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
index 1a1351ac6c6a69f..fb685470134c09f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
@@ -33,10 +33,9 @@
 // Reduction in this file _are_ supported by the AArch64 SVE backend
 
 #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 //
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
index af30fb1bf8a1da3..55111d1ee75c717 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
@@ -32,10 +32,9 @@
 // sparse_reducitons.mlir)
 
 #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 //
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
index 5f527a98afbe092..5b47687bfa52be2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
@@ -30,7 +30,7 @@
 // Do the same run, but now with  VLA vectorization.
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 #trait_scale = {
   indexing_maps = [
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
index 11301c43163527a..70546f804fa1c3a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
@@ -31,10 +31,9 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
-#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}>
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 //
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
index 59ea9a8114c53a8..e8a9ea6e2c5a775 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
@@ -34,7 +34,7 @@
 !Filename = !llvm.ptr<i8>
 
 #SparseMatrix = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #spmm = {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
index 753d184f021bd21..fe8043bf1063047 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
@@ -39,7 +39,7 @@
 }>
 
 #CSR  = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #DCSR = #sparse_tensor.encoding<{
@@ -47,8 +47,7 @@
 }>
 
 #CSC = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j) -> (j,i)>
+  map = (d0, d1) -> (d1 : dense, d0 : compressed)
 }>
 
 #DCSC = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir
index a310e59d53038fd..8f67553592fbac0 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir
@@ -22,7 +22,7 @@
 // RUN: | FileCheck %s
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 32,
   crdWidth = 32
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
index 1e51aae5f389260..4b321f32d04c0c5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
@@ -29,7 +29,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 32,
   crdWidth = 32
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir
index 2c09ae298e353a9..0658ff1ff41c0ed 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir
@@ -9,7 +9,7 @@
 // RUN:   --e main --entry-point-result=void \
 // RUN: | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 module {
   // Compute matrix vector y = Ax
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
index 16a240838d7c4fd..947c7d9cbbc3cb7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
@@ -29,7 +29,7 @@
 }>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
+  map = (d0, d1) -> (d0 : dense, d1 : compressed),
   posWidth = 32,
   crdWidth = 32
 }>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir
index c032201b781f5ee..4266e51658073f9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir
@@ -9,7 +9,7 @@
 // RUN:   --e main --entry-point-result=void \
 // RUN: | FileCheck %s
 
-#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>
+#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
 
 module {
   // Compute matrix vector y = Ax
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir
index 934bd837420c1bf..dd4b0bbd8e59acb 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir
@@ -28,7 +28,7 @@
 !Filename = !llvm.ptr<i8>
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"]
+  map = (d0, d1) -> (d0 : dense, d1 : compressed)
 }>
 
 #trait_sampled_dense_dense = {
diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py
index bd5d9750eb57484..e1048edce184a51 100644
--- a/mlir/test/python/dialects/sparse_tensor/dialect.py
+++ b/mlir/test/python/dialects/sparse_tensor/dialect.py
@@ -56,8 +56,7 @@ def testEncodingAttr2D():
     with Context() as ctx:
         parsed = Attribute.parse(
             "#sparse_tensor.encoding<{"
-            '  lvlTypes = [ "dense", "compressed" ],'
-            "  dimToLvl = affine_map<(d0, d1) -> (d1, d0)>,"
+            "  map = (d0, d1) -> (d1 : dense, d0 : compressed),"
             "  posWidth = 8,"
             "  crdWidth = 32"
             "}>"



More information about the Mlir-commits mailing list