[Mlir-commits] [mlir] [mlir][sparse] rename DimLevelType to LevelType (PR #73561)

Aart Bik llvmlistbot at llvm.org
Mon Nov 27 12:32:00 PST 2023


https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/73561

The "Dim" prefix is a legacy left-over that no longer makes sense, since we have a very strict "Dimension" vs. "Level" definition for sparse tensor types and their storage.

>From 75dfdcfd16ea95e16190f5a41b0b1df12cc63532 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Mon, 27 Nov 2023 10:36:55 -0800
Subject: [PATCH] [mlir][sparse] rename DimLevelType to LevelType

The "Dim" prefix is a legacy left-over that no longer makes
sense, since we have a very strict "Dimension" vs. "Level"
definition for sparse tensor types and their storage.
---
 mlir/include/mlir-c/Dialect/SparseTensor.h    |  36 +-
 .../mlir/Dialect/SparseTensor/IR/Enums.h      | 393 +++++++++---------
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |  10 +-
 .../IR/SparseTensorStorageLayout.h            |   6 +-
 .../SparseTensor/IR/SparseTensorType.h        |   4 +-
 .../mlir/Dialect/SparseTensor/Utils/Merger.h  |  18 +-
 .../mlir/ExecutionEngine/SparseTensor/File.h  |   2 +-
 .../ExecutionEngine/SparseTensor/Storage.h    |  36 +-
 .../ExecutionEngine/SparseTensorRuntime.h     |   2 +-
 .../Bindings/Python/DialectSparseTensor.cpp   |  34 +-
 mlir/lib/CAPI/Dialect/SparseTensor.cpp        |  49 ++-
 .../SparseTensor/IR/Detail/DimLvlMap.cpp      |   2 +-
 .../SparseTensor/IR/Detail/DimLvlMap.h        |   8 +-
 .../IR/Detail/DimLvlMapParser.cpp             |   2 +-
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp  |   4 +-
 .../SparseTensor/IR/Detail/LvlTypeParser.h    |   2 +-
 .../SparseTensor/IR/SparseTensorDialect.cpp   |  79 ++--
 .../SparseTensor/Transforms/CodegenEnv.h      |   4 +-
 .../SparseTensor/Transforms/CodegenUtils.h    |   4 +-
 .../SparseTensor/Transforms/LoopEmitter.cpp   |   8 +-
 .../SparseTensor/Transforms/LoopEmitter.h     |   2 +-
 .../Transforms/SparseTensorCodegen.cpp        |  16 +-
 .../Transforms/SparseTensorConversion.cpp     |   2 +-
 .../Transforms/SparseTensorDescriptor.cpp     |   2 +-
 .../Transforms/SparseTensorRewriting.cpp      |   5 +-
 .../Transforms/Sparsification.cpp             | 170 ++++----
 .../lib/Dialect/SparseTensor/Utils/Merger.cpp |   3 +-
 .../ExecutionEngine/SparseTensor/Storage.cpp  |   2 +-
 .../ExecutionEngine/SparseTensorRuntime.cpp   |   4 +-
 mlir/test/CAPI/sparse_tensor.c                |   4 +-
 .../CPU/sparse_conversion_element.mlir        |   2 +-
 .../CPU/sparse_conversion_sparse2sparse.mlir  |   4 +-
 .../Dialect/SparseTensor/python/test_SDDMM.py |  10 +-
 .../Dialect/SparseTensor/python/test_SpMM.py  |  10 +-
 .../SparseTensor/python/test_output.py        |  16 +-
 .../SparseTensor/python/test_stress.py        |   2 +-
 .../python/dialects/sparse_tensor/dialect.py  |   4 +-
 .../Dialect/SparseTensor/MergerTest.cpp       |  34 +-
 38 files changed, 493 insertions(+), 502 deletions(-)

diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h
index 859a4f0dd9f52c8..41d024db04964ef 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -22,24 +22,24 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor);
 /// Dimension level types (and properties) that define sparse tensors.
 /// See the documentation in SparseTensorAttrDefs.td for their meaning.
 ///
-/// These correspond to SparseTensorEncodingAttr::DimLevelType in the C++ API.
+/// These correspond to SparseTensorEncodingAttr::LevelType in the C++ API.
 /// If updating, keep them in sync and update the static_assert in the impl
 /// file.
-enum MlirSparseTensorDimLevelType {
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4,                   // 0b00001_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8,              // 0b00010_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9,           // 0b00010_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10,          // 0b00010_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11,       // 0b00010_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16,              // 0b00100_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17,           // 0b00100_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18,           // 0b00100_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19,        // 0b00100_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED = 32,       // 0b01000_00
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU = 33,    // 0b01000_01
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NO = 34,    // 0b01000_10
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
-  MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64,        // 0b10000_00
+enum MlirSparseTensorLevelType {
+  MLIR_SPARSE_TENSOR_LEVEL_DENSE = 4,                   // 0b00001_00
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 8,              // 0b00010_00
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 9,           // 0b00010_01
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 10,          // 0b00010_10
+  MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 11,       // 0b00010_11
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 16,              // 0b00100_00
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 17,           // 0b00100_01
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 18,           // 0b00100_10
+  MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 19,        // 0b00100_11
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 32,       // 0b01000_00
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 33,    // 0b01000_01
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 34,    // 0b01000_10
+  MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
+  MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR = 64,        // 0b10000_00
 };
 
 //===----------------------------------------------------------------------===//
@@ -53,7 +53,7 @@ mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr);
 /// Creates a `sparse_tensor.encoding` attribute with the given parameters.
 MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet(
     MlirContext ctx, intptr_t lvlRank,
-    enum MlirSparseTensorDimLevelType const *lvlTypes, MlirAffineMap dimToLvl,
+    enum MlirSparseTensorLevelType const *lvlTypes, MlirAffineMap dimToLvl,
     MlirAffineMap lvlTodim, int posWidth, int crdWidth);
 
 /// Returns the level-rank of the `sparse_tensor.encoding` attribute.
@@ -61,7 +61,7 @@ MLIR_CAPI_EXPORTED intptr_t
 mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr);
 
 /// Returns a specified level-type of the `sparse_tensor.encoding` attribute.
-MLIR_CAPI_EXPORTED enum MlirSparseTensorDimLevelType
+MLIR_CAPI_EXPORTED enum MlirSparseTensorLevelType
 mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl);
 
 /// Returns the dimension-to-level mapping of the `sparse_tensor.encoding`
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 5f9c271b398dedb..9af42f00f91ed4e 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -10,7 +10,7 @@
 // IR, and the lightweight runtime support library for sparse tensor
 // manipulations.  That is, all the enums are used to define the API
 // of the runtime library and hence are also needed when generating
-// calls into the runtime library.  Moveover, the `DimLevelType` enum
+// calls into the runtime library.  Moveover, the `LevelType` enum
 // is also used as the internal IR encoding of dimension level types,
 // to avoid code duplication (e.g., for the predicates).
 //
@@ -162,10 +162,10 @@ enum class Action : uint32_t {
 /// about the particular binary encoding.
 ///
 /// The `Undef` "format" is a special value used internally for cases
-/// where we need to store an undefined or indeterminate `DimLevelType`.
+/// where we need to store an undefined or indeterminate `LevelType`.
 /// It should not be used externally, since it does not indicate an
 /// actual/representable format.
-enum class DimLevelType : uint8_t {
+enum class LevelType : uint8_t {
   Undef = 0,                // 0b00000_00
   Dense = 4,                // 0b00001_00
   Compressed = 8,           // 0b00010_00
@@ -199,44 +199,44 @@ enum class LevelPropertyNondefault : uint8_t {
 };
 
 /// Returns string representation of the given dimension level type.
-constexpr const char *toMLIRString(DimLevelType lt) {
+constexpr const char *toMLIRString(LevelType lt) {
   switch (lt) {
-  case DimLevelType::Undef:
+  case LevelType::Undef:
     return "undef";
-  case DimLevelType::Dense:
+  case LevelType::Dense:
     return "dense";
-  case DimLevelType::Compressed:
+  case LevelType::Compressed:
     return "compressed";
-  case DimLevelType::CompressedNu:
+  case LevelType::CompressedNu:
     return "compressed(nonunique)";
-  case DimLevelType::CompressedNo:
+  case LevelType::CompressedNo:
     return "compressed(nonordered)";
-  case DimLevelType::CompressedNuNo:
+  case LevelType::CompressedNuNo:
     return "compressed(nonunique, nonordered)";
-  case DimLevelType::Singleton:
+  case LevelType::Singleton:
     return "singleton";
-  case DimLevelType::SingletonNu:
+  case LevelType::SingletonNu:
     return "singleton(nonunique)";
-  case DimLevelType::SingletonNo:
+  case LevelType::SingletonNo:
     return "singleton(nonordered)";
-  case DimLevelType::SingletonNuNo:
+  case LevelType::SingletonNuNo:
     return "singleton(nonunique, nonordered)";
-  case DimLevelType::LooseCompressed:
+  case LevelType::LooseCompressed:
     return "loose_compressed";
-  case DimLevelType::LooseCompressedNu:
+  case LevelType::LooseCompressedNu:
     return "loose_compressed(nonunique)";
-  case DimLevelType::LooseCompressedNo:
+  case LevelType::LooseCompressedNo:
     return "loose_compressed(nonordered)";
-  case DimLevelType::LooseCompressedNuNo:
+  case LevelType::LooseCompressedNuNo:
     return "loose_compressed(nonunique, nonordered)";
-  case DimLevelType::TwoOutOfFour:
+  case LevelType::TwoOutOfFour:
     return "block2_4";
   }
   return "";
 }
 
-/// Check that the `DimLevelType` contains a valid (possibly undefined) value.
-constexpr bool isValidLT(DimLevelType lt) {
+/// Check that the `LevelType` contains a valid (possibly undefined) value.
+constexpr bool isValidLT(LevelType lt) {
   const uint8_t formatBits = static_cast<uint8_t>(lt) >> 2;
   const uint8_t propertyBits = static_cast<uint8_t>(lt) & 3;
   // If undefined or dense, then must be unique and ordered.
@@ -246,75 +246,75 @@ constexpr bool isValidLT(DimLevelType lt) {
              : (formatBits == 2 || formatBits == 4 || formatBits == 8);
 }
 
-/// Check if the `DimLevelType` is the special undefined value.
-constexpr bool isUndefLT(DimLevelType lt) { return lt == DimLevelType::Undef; }
+/// Check if the `LevelType` is the special undefined value.
+constexpr bool isUndefLT(LevelType lt) { return lt == LevelType::Undef; }
 
-/// Check if the `DimLevelType` is dense (regardless of properties).
-constexpr bool isDenseLT(DimLevelType lt) {
+/// Check if the `LevelType` is dense (regardless of properties).
+constexpr bool isDenseLT(LevelType lt) {
   return (static_cast<uint8_t>(lt) & ~3) ==
-         static_cast<uint8_t>(DimLevelType::Dense);
+         static_cast<uint8_t>(LevelType::Dense);
 }
 
-/// Check if the `DimLevelType` is compressed (regardless of properties).
-constexpr bool isCompressedLT(DimLevelType lt) {
+/// Check if the `LevelType` is compressed (regardless of properties).
+constexpr bool isCompressedLT(LevelType lt) {
   return (static_cast<uint8_t>(lt) & ~3) ==
-         static_cast<uint8_t>(DimLevelType::Compressed);
+         static_cast<uint8_t>(LevelType::Compressed);
 }
 
-/// Check if the `DimLevelType` is singleton (regardless of properties).
-constexpr bool isSingletonLT(DimLevelType lt) {
+/// Check if the `LevelType` is singleton (regardless of properties).
+constexpr bool isSingletonLT(LevelType lt) {
   return (static_cast<uint8_t>(lt) & ~3) ==
-         static_cast<uint8_t>(DimLevelType::Singleton);
+         static_cast<uint8_t>(LevelType::Singleton);
 }
 
-/// Check if the `DimLevelType` is loose compressed (regardless of properties).
-constexpr bool isLooseCompressedLT(DimLevelType lt) {
+/// Check if the `LevelType` is loose compressed (regardless of properties).
+constexpr bool isLooseCompressedLT(LevelType lt) {
   return (static_cast<uint8_t>(lt) & ~3) ==
-         static_cast<uint8_t>(DimLevelType::LooseCompressed);
+         static_cast<uint8_t>(LevelType::LooseCompressed);
 }
 
-/// Check if the `DimLevelType` is 2OutOf4 (regardless of properties).
-constexpr bool is2OutOf4LT(DimLevelType lt) {
+/// Check if the `LevelType` is 2OutOf4 (regardless of properties).
+constexpr bool is2OutOf4LT(LevelType lt) {
   return (static_cast<uint8_t>(lt) & ~3) ==
-         static_cast<uint8_t>(DimLevelType::TwoOutOfFour);
+         static_cast<uint8_t>(LevelType::TwoOutOfFour);
 }
 
-/// Check if the `DimLevelType` needs positions array.
-constexpr bool isWithPosLT(DimLevelType lt) {
+/// Check if the `LevelType` needs positions array.
+constexpr bool isWithPosLT(LevelType lt) {
   return isCompressedLT(lt) || isLooseCompressedLT(lt);
 }
 
-/// Check if the `DimLevelType` needs coordinates array.
-constexpr bool isWithCrdLT(DimLevelType lt) {
+/// Check if the `LevelType` needs coordinates array.
+constexpr bool isWithCrdLT(LevelType lt) {
   return isCompressedLT(lt) || isSingletonLT(lt) || isLooseCompressedLT(lt) ||
          is2OutOf4LT(lt);
 }
 
-/// Check if the `DimLevelType` is ordered (regardless of storage format).
-constexpr bool isOrderedLT(DimLevelType lt) {
+/// Check if the `LevelType` is ordered (regardless of storage format).
+constexpr bool isOrderedLT(LevelType lt) {
   return !(static_cast<uint8_t>(lt) & 2);
 }
 
-/// Check if the `DimLevelType` is unique (regardless of storage format).
-constexpr bool isUniqueLT(DimLevelType lt) {
+/// Check if the `LevelType` is unique (regardless of storage format).
+constexpr bool isUniqueLT(LevelType lt) {
   return !(static_cast<uint8_t>(lt) & 1);
 }
 
-/// Convert a DimLevelType to its corresponding LevelFormat.
+/// Convert a LevelType to its corresponding LevelFormat.
 /// Returns std::nullopt when input lt is Undef.
-constexpr std::optional<LevelFormat> getLevelFormat(DimLevelType lt) {
-  if (lt == DimLevelType::Undef)
+constexpr std::optional<LevelFormat> getLevelFormat(LevelType lt) {
+  if (lt == LevelType::Undef)
     return std::nullopt;
   return static_cast<LevelFormat>(static_cast<uint8_t>(lt) & ~3);
 }
 
-/// Convert a LevelFormat to its corresponding DimLevelType with the given
+/// Convert a LevelFormat to its corresponding LevelType with the given
 /// properties. Returns std::nullopt when the properties are not applicable
 /// for the input level format.
-constexpr std::optional<DimLevelType>
-buildLevelType(LevelFormat lf, bool ordered, bool unique) {
-  auto lt = static_cast<DimLevelType>(static_cast<uint8_t>(lf) |
-                                      (ordered ? 0 : 2) | (unique ? 0 : 1));
+constexpr std::optional<LevelType> buildLevelType(LevelFormat lf, bool ordered,
+                                                  bool unique) {
+  auto lt = static_cast<LevelType>(static_cast<uint8_t>(lf) |
+                                   (ordered ? 0 : 2) | (unique ? 0 : 1));
   return isValidLT(lt) ? std::optional(lt) : std::nullopt;
 }
 
@@ -323,190 +323,187 @@ buildLevelType(LevelFormat lf, bool ordered, bool unique) {
 //
 
 static_assert(
-    (getLevelFormat(DimLevelType::Undef) == std::nullopt &&
-     *getLevelFormat(DimLevelType::Dense) == LevelFormat::Dense &&
-     *getLevelFormat(DimLevelType::Compressed) == LevelFormat::Compressed &&
-     *getLevelFormat(DimLevelType::CompressedNu) == LevelFormat::Compressed &&
-     *getLevelFormat(DimLevelType::CompressedNo) == LevelFormat::Compressed &&
-     *getLevelFormat(DimLevelType::CompressedNuNo) == LevelFormat::Compressed &&
-     *getLevelFormat(DimLevelType::Singleton) == LevelFormat::Singleton &&
-     *getLevelFormat(DimLevelType::SingletonNu) == LevelFormat::Singleton &&
-     *getLevelFormat(DimLevelType::SingletonNo) == LevelFormat::Singleton &&
-     *getLevelFormat(DimLevelType::SingletonNuNo) == LevelFormat::Singleton &&
-     *getLevelFormat(DimLevelType::LooseCompressed) ==
+    (getLevelFormat(LevelType::Undef) == std::nullopt &&
+     *getLevelFormat(LevelType::Dense) == LevelFormat::Dense &&
+     *getLevelFormat(LevelType::Compressed) == LevelFormat::Compressed &&
+     *getLevelFormat(LevelType::CompressedNu) == LevelFormat::Compressed &&
+     *getLevelFormat(LevelType::CompressedNo) == LevelFormat::Compressed &&
+     *getLevelFormat(LevelType::CompressedNuNo) == LevelFormat::Compressed &&
+     *getLevelFormat(LevelType::Singleton) == LevelFormat::Singleton &&
+     *getLevelFormat(LevelType::SingletonNu) == LevelFormat::Singleton &&
+     *getLevelFormat(LevelType::SingletonNo) == LevelFormat::Singleton &&
+     *getLevelFormat(LevelType::SingletonNuNo) == LevelFormat::Singleton &&
+     *getLevelFormat(LevelType::LooseCompressed) ==
          LevelFormat::LooseCompressed &&
-     *getLevelFormat(DimLevelType::LooseCompressedNu) ==
+     *getLevelFormat(LevelType::LooseCompressedNu) ==
          LevelFormat::LooseCompressed &&
-     *getLevelFormat(DimLevelType::LooseCompressedNo) ==
+     *getLevelFormat(LevelType::LooseCompressedNo) ==
          LevelFormat::LooseCompressed &&
-     *getLevelFormat(DimLevelType::LooseCompressedNuNo) ==
+     *getLevelFormat(LevelType::LooseCompressedNuNo) ==
          LevelFormat::LooseCompressed &&
-     *getLevelFormat(DimLevelType::TwoOutOfFour) == LevelFormat::TwoOutOfFour),
+     *getLevelFormat(LevelType::TwoOutOfFour) == LevelFormat::TwoOutOfFour),
     "getLevelFormat conversion is broken");
 
 static_assert(
     (buildLevelType(LevelFormat::Dense, false, true) == std::nullopt &&
      buildLevelType(LevelFormat::Dense, true, false) == std::nullopt &&
      buildLevelType(LevelFormat::Dense, false, false) == std::nullopt &&
-     *buildLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense &&
+     *buildLevelType(LevelFormat::Dense, true, true) == LevelType::Dense &&
      *buildLevelType(LevelFormat::Compressed, true, true) ==
-         DimLevelType::Compressed &&
+         LevelType::Compressed &&
      *buildLevelType(LevelFormat::Compressed, true, false) ==
-         DimLevelType::CompressedNu &&
+         LevelType::CompressedNu &&
      *buildLevelType(LevelFormat::Compressed, false, true) ==
-         DimLevelType::CompressedNo &&
+         LevelType::CompressedNo &&
      *buildLevelType(LevelFormat::Compressed, false, false) ==
-         DimLevelType::CompressedNuNo &&
+         LevelType::CompressedNuNo &&
      *buildLevelType(LevelFormat::Singleton, true, true) ==
-         DimLevelType::Singleton &&
+         LevelType::Singleton &&
      *buildLevelType(LevelFormat::Singleton, true, false) ==
-         DimLevelType::SingletonNu &&
+         LevelType::SingletonNu &&
      *buildLevelType(LevelFormat::Singleton, false, true) ==
-         DimLevelType::SingletonNo &&
+         LevelType::SingletonNo &&
      *buildLevelType(LevelFormat::Singleton, false, false) ==
-         DimLevelType::SingletonNuNo &&
+         LevelType::SingletonNuNo &&
      *buildLevelType(LevelFormat::LooseCompressed, true, true) ==
-         DimLevelType::LooseCompressed &&
+         LevelType::LooseCompressed &&
      *buildLevelType(LevelFormat::LooseCompressed, true, false) ==
-         DimLevelType::LooseCompressedNu &&
+         LevelType::LooseCompressedNu &&
      *buildLevelType(LevelFormat::LooseCompressed, false, true) ==
-         DimLevelType::LooseCompressedNo &&
+         LevelType::LooseCompressedNo &&
      *buildLevelType(LevelFormat::LooseCompressed, false, false) ==
-         DimLevelType::LooseCompressedNuNo &&
+         LevelType::LooseCompressedNuNo &&
      buildLevelType(LevelFormat::TwoOutOfFour, false, true) == std::nullopt &&
      buildLevelType(LevelFormat::TwoOutOfFour, true, false) == std::nullopt &&
      buildLevelType(LevelFormat::TwoOutOfFour, false, false) == std::nullopt &&
      *buildLevelType(LevelFormat::TwoOutOfFour, true, true) ==
-         DimLevelType::TwoOutOfFour),
+         LevelType::TwoOutOfFour),
     "buildLevelType conversion is broken");
 
-static_assert((isValidLT(DimLevelType::Undef) &&
-               isValidLT(DimLevelType::Dense) &&
-               isValidLT(DimLevelType::Compressed) &&
-               isValidLT(DimLevelType::CompressedNu) &&
-               isValidLT(DimLevelType::CompressedNo) &&
-               isValidLT(DimLevelType::CompressedNuNo) &&
-               isValidLT(DimLevelType::Singleton) &&
-               isValidLT(DimLevelType::SingletonNu) &&
-               isValidLT(DimLevelType::SingletonNo) &&
-               isValidLT(DimLevelType::SingletonNuNo) &&
-               isValidLT(DimLevelType::LooseCompressed) &&
-               isValidLT(DimLevelType::LooseCompressedNu) &&
-               isValidLT(DimLevelType::LooseCompressedNo) &&
-               isValidLT(DimLevelType::LooseCompressedNuNo) &&
-               isValidLT(DimLevelType::TwoOutOfFour)),
-              "isValidLT definition is broken");
-
-static_assert((isDenseLT(DimLevelType::Dense) &&
-               !isDenseLT(DimLevelType::Compressed) &&
-               !isDenseLT(DimLevelType::CompressedNu) &&
-               !isDenseLT(DimLevelType::CompressedNo) &&
-               !isDenseLT(DimLevelType::CompressedNuNo) &&
-               !isDenseLT(DimLevelType::Singleton) &&
-               !isDenseLT(DimLevelType::SingletonNu) &&
-               !isDenseLT(DimLevelType::SingletonNo) &&
-               !isDenseLT(DimLevelType::SingletonNuNo) &&
-               !isDenseLT(DimLevelType::LooseCompressed) &&
-               !isDenseLT(DimLevelType::LooseCompressedNu) &&
-               !isDenseLT(DimLevelType::LooseCompressedNo) &&
-               !isDenseLT(DimLevelType::LooseCompressedNuNo) &&
-               !isDenseLT(DimLevelType::TwoOutOfFour)),
+static_assert(
+    (isValidLT(LevelType::Undef) && isValidLT(LevelType::Dense) &&
+     isValidLT(LevelType::Compressed) && isValidLT(LevelType::CompressedNu) &&
+     isValidLT(LevelType::CompressedNo) &&
+     isValidLT(LevelType::CompressedNuNo) && isValidLT(LevelType::Singleton) &&
+     isValidLT(LevelType::SingletonNu) && isValidLT(LevelType::SingletonNo) &&
+     isValidLT(LevelType::SingletonNuNo) &&
+     isValidLT(LevelType::LooseCompressed) &&
+     isValidLT(LevelType::LooseCompressedNu) &&
+     isValidLT(LevelType::LooseCompressedNo) &&
+     isValidLT(LevelType::LooseCompressedNuNo) &&
+     isValidLT(LevelType::TwoOutOfFour)),
+    "isValidLT definition is broken");
+
+static_assert((isDenseLT(LevelType::Dense) &&
+               !isDenseLT(LevelType::Compressed) &&
+               !isDenseLT(LevelType::CompressedNu) &&
+               !isDenseLT(LevelType::CompressedNo) &&
+               !isDenseLT(LevelType::CompressedNuNo) &&
+               !isDenseLT(LevelType::Singleton) &&
+               !isDenseLT(LevelType::SingletonNu) &&
+               !isDenseLT(LevelType::SingletonNo) &&
+               !isDenseLT(LevelType::SingletonNuNo) &&
+               !isDenseLT(LevelType::LooseCompressed) &&
+               !isDenseLT(LevelType::LooseCompressedNu) &&
+               !isDenseLT(LevelType::LooseCompressedNo) &&
+               !isDenseLT(LevelType::LooseCompressedNuNo) &&
+               !isDenseLT(LevelType::TwoOutOfFour)),
               "isDenseLT definition is broken");
 
-static_assert((!isCompressedLT(DimLevelType::Dense) &&
-               isCompressedLT(DimLevelType::Compressed) &&
-               isCompressedLT(DimLevelType::CompressedNu) &&
-               isCompressedLT(DimLevelType::CompressedNo) &&
-               isCompressedLT(DimLevelType::CompressedNuNo) &&
-               !isCompressedLT(DimLevelType::Singleton) &&
-               !isCompressedLT(DimLevelType::SingletonNu) &&
-               !isCompressedLT(DimLevelType::SingletonNo) &&
-               !isCompressedLT(DimLevelType::SingletonNuNo) &&
-               !isCompressedLT(DimLevelType::LooseCompressed) &&
-               !isCompressedLT(DimLevelType::LooseCompressedNu) &&
-               !isCompressedLT(DimLevelType::LooseCompressedNo) &&
-               !isCompressedLT(DimLevelType::LooseCompressedNuNo) &&
-               !isCompressedLT(DimLevelType::TwoOutOfFour)),
+static_assert((!isCompressedLT(LevelType::Dense) &&
+               isCompressedLT(LevelType::Compressed) &&
+               isCompressedLT(LevelType::CompressedNu) &&
+               isCompressedLT(LevelType::CompressedNo) &&
+               isCompressedLT(LevelType::CompressedNuNo) &&
+               !isCompressedLT(LevelType::Singleton) &&
+               !isCompressedLT(LevelType::SingletonNu) &&
+               !isCompressedLT(LevelType::SingletonNo) &&
+               !isCompressedLT(LevelType::SingletonNuNo) &&
+               !isCompressedLT(LevelType::LooseCompressed) &&
+               !isCompressedLT(LevelType::LooseCompressedNu) &&
+               !isCompressedLT(LevelType::LooseCompressedNo) &&
+               !isCompressedLT(LevelType::LooseCompressedNuNo) &&
+               !isCompressedLT(LevelType::TwoOutOfFour)),
               "isCompressedLT definition is broken");
 
-static_assert((!isSingletonLT(DimLevelType::Dense) &&
-               !isSingletonLT(DimLevelType::Compressed) &&
-               !isSingletonLT(DimLevelType::CompressedNu) &&
-               !isSingletonLT(DimLevelType::CompressedNo) &&
-               !isSingletonLT(DimLevelType::CompressedNuNo) &&
-               isSingletonLT(DimLevelType::Singleton) &&
-               isSingletonLT(DimLevelType::SingletonNu) &&
-               isSingletonLT(DimLevelType::SingletonNo) &&
-               isSingletonLT(DimLevelType::SingletonNuNo) &&
-               !isSingletonLT(DimLevelType::LooseCompressed) &&
-               !isSingletonLT(DimLevelType::LooseCompressedNu) &&
-               !isSingletonLT(DimLevelType::LooseCompressedNo) &&
-               !isSingletonLT(DimLevelType::LooseCompressedNuNo) &&
-               !isSingletonLT(DimLevelType::TwoOutOfFour)),
+static_assert((!isSingletonLT(LevelType::Dense) &&
+               !isSingletonLT(LevelType::Compressed) &&
+               !isSingletonLT(LevelType::CompressedNu) &&
+               !isSingletonLT(LevelType::CompressedNo) &&
+               !isSingletonLT(LevelType::CompressedNuNo) &&
+               isSingletonLT(LevelType::Singleton) &&
+               isSingletonLT(LevelType::SingletonNu) &&
+               isSingletonLT(LevelType::SingletonNo) &&
+               isSingletonLT(LevelType::SingletonNuNo) &&
+               !isSingletonLT(LevelType::LooseCompressed) &&
+               !isSingletonLT(LevelType::LooseCompressedNu) &&
+               !isSingletonLT(LevelType::LooseCompressedNo) &&
+               !isSingletonLT(LevelType::LooseCompressedNuNo) &&
+               !isSingletonLT(LevelType::TwoOutOfFour)),
               "isSingletonLT definition is broken");
 
-static_assert((!isLooseCompressedLT(DimLevelType::Dense) &&
-               !isLooseCompressedLT(DimLevelType::Compressed) &&
-               !isLooseCompressedLT(DimLevelType::CompressedNu) &&
-               !isLooseCompressedLT(DimLevelType::CompressedNo) &&
-               !isLooseCompressedLT(DimLevelType::CompressedNuNo) &&
-               !isLooseCompressedLT(DimLevelType::Singleton) &&
-               !isLooseCompressedLT(DimLevelType::SingletonNu) &&
-               !isLooseCompressedLT(DimLevelType::SingletonNo) &&
-               !isLooseCompressedLT(DimLevelType::SingletonNuNo) &&
-               isLooseCompressedLT(DimLevelType::LooseCompressed) &&
-               isLooseCompressedLT(DimLevelType::LooseCompressedNu) &&
-               isLooseCompressedLT(DimLevelType::LooseCompressedNo) &&
-               isLooseCompressedLT(DimLevelType::LooseCompressedNuNo) &&
-               !isLooseCompressedLT(DimLevelType::TwoOutOfFour)),
+static_assert((!isLooseCompressedLT(LevelType::Dense) &&
+               !isLooseCompressedLT(LevelType::Compressed) &&
+               !isLooseCompressedLT(LevelType::CompressedNu) &&
+               !isLooseCompressedLT(LevelType::CompressedNo) &&
+               !isLooseCompressedLT(LevelType::CompressedNuNo) &&
+               !isLooseCompressedLT(LevelType::Singleton) &&
+               !isLooseCompressedLT(LevelType::SingletonNu) &&
+               !isLooseCompressedLT(LevelType::SingletonNo) &&
+               !isLooseCompressedLT(LevelType::SingletonNuNo) &&
+               isLooseCompressedLT(LevelType::LooseCompressed) &&
+               isLooseCompressedLT(LevelType::LooseCompressedNu) &&
+               isLooseCompressedLT(LevelType::LooseCompressedNo) &&
+               isLooseCompressedLT(LevelType::LooseCompressedNuNo) &&
+               !isLooseCompressedLT(LevelType::TwoOutOfFour)),
               "isLooseCompressedLT definition is broken");
 
-static_assert((!is2OutOf4LT(DimLevelType::Dense) &&
-               !is2OutOf4LT(DimLevelType::Compressed) &&
-               !is2OutOf4LT(DimLevelType::CompressedNu) &&
-               !is2OutOf4LT(DimLevelType::CompressedNo) &&
-               !is2OutOf4LT(DimLevelType::CompressedNuNo) &&
-               !is2OutOf4LT(DimLevelType::Singleton) &&
-               !is2OutOf4LT(DimLevelType::SingletonNu) &&
-               !is2OutOf4LT(DimLevelType::SingletonNo) &&
-               !is2OutOf4LT(DimLevelType::SingletonNuNo) &&
-               !is2OutOf4LT(DimLevelType::LooseCompressed) &&
-               !is2OutOf4LT(DimLevelType::LooseCompressedNu) &&
-               !is2OutOf4LT(DimLevelType::LooseCompressedNo) &&
-               !is2OutOf4LT(DimLevelType::LooseCompressedNuNo) &&
-               is2OutOf4LT(DimLevelType::TwoOutOfFour)),
+static_assert((!is2OutOf4LT(LevelType::Dense) &&
+               !is2OutOf4LT(LevelType::Compressed) &&
+               !is2OutOf4LT(LevelType::CompressedNu) &&
+               !is2OutOf4LT(LevelType::CompressedNo) &&
+               !is2OutOf4LT(LevelType::CompressedNuNo) &&
+               !is2OutOf4LT(LevelType::Singleton) &&
+               !is2OutOf4LT(LevelType::SingletonNu) &&
+               !is2OutOf4LT(LevelType::SingletonNo) &&
+               !is2OutOf4LT(LevelType::SingletonNuNo) &&
+               !is2OutOf4LT(LevelType::LooseCompressed) &&
+               !is2OutOf4LT(LevelType::LooseCompressedNu) &&
+               !is2OutOf4LT(LevelType::LooseCompressedNo) &&
+               !is2OutOf4LT(LevelType::LooseCompressedNuNo) &&
+               is2OutOf4LT(LevelType::TwoOutOfFour)),
               "is2OutOf4LT definition is broken");
 
-static_assert((isOrderedLT(DimLevelType::Dense) &&
-               isOrderedLT(DimLevelType::Compressed) &&
-               isOrderedLT(DimLevelType::CompressedNu) &&
-               !isOrderedLT(DimLevelType::CompressedNo) &&
-               !isOrderedLT(DimLevelType::CompressedNuNo) &&
-               isOrderedLT(DimLevelType::Singleton) &&
-               isOrderedLT(DimLevelType::SingletonNu) &&
-               !isOrderedLT(DimLevelType::SingletonNo) &&
-               !isOrderedLT(DimLevelType::SingletonNuNo) &&
-               isOrderedLT(DimLevelType::LooseCompressed) &&
-               isOrderedLT(DimLevelType::LooseCompressedNu) &&
-               !isOrderedLT(DimLevelType::LooseCompressedNo) &&
-               !isOrderedLT(DimLevelType::LooseCompressedNuNo) &&
-               isOrderedLT(DimLevelType::TwoOutOfFour)),
+static_assert((isOrderedLT(LevelType::Dense) &&
+               isOrderedLT(LevelType::Compressed) &&
+               isOrderedLT(LevelType::CompressedNu) &&
+               !isOrderedLT(LevelType::CompressedNo) &&
+               !isOrderedLT(LevelType::CompressedNuNo) &&
+               isOrderedLT(LevelType::Singleton) &&
+               isOrderedLT(LevelType::SingletonNu) &&
+               !isOrderedLT(LevelType::SingletonNo) &&
+               !isOrderedLT(LevelType::SingletonNuNo) &&
+               isOrderedLT(LevelType::LooseCompressed) &&
+               isOrderedLT(LevelType::LooseCompressedNu) &&
+               !isOrderedLT(LevelType::LooseCompressedNo) &&
+               !isOrderedLT(LevelType::LooseCompressedNuNo) &&
+               isOrderedLT(LevelType::TwoOutOfFour)),
               "isOrderedLT definition is broken");
 
-static_assert((isUniqueLT(DimLevelType::Dense) &&
-               isUniqueLT(DimLevelType::Compressed) &&
-               !isUniqueLT(DimLevelType::CompressedNu) &&
-               isUniqueLT(DimLevelType::CompressedNo) &&
-               !isUniqueLT(DimLevelType::CompressedNuNo) &&
-               isUniqueLT(DimLevelType::Singleton) &&
-               !isUniqueLT(DimLevelType::SingletonNu) &&
-               isUniqueLT(DimLevelType::SingletonNo) &&
-               !isUniqueLT(DimLevelType::SingletonNuNo) &&
-               isUniqueLT(DimLevelType::LooseCompressed) &&
-               !isUniqueLT(DimLevelType::LooseCompressedNu) &&
-               isUniqueLT(DimLevelType::LooseCompressedNo) &&
-               !isUniqueLT(DimLevelType::LooseCompressedNuNo) &&
-               isUniqueLT(DimLevelType::TwoOutOfFour)),
+static_assert((isUniqueLT(LevelType::Dense) &&
+               isUniqueLT(LevelType::Compressed) &&
+               !isUniqueLT(LevelType::CompressedNu) &&
+               isUniqueLT(LevelType::CompressedNo) &&
+               !isUniqueLT(LevelType::CompressedNuNo) &&
+               isUniqueLT(LevelType::Singleton) &&
+               !isUniqueLT(LevelType::SingletonNu) &&
+               isUniqueLT(LevelType::SingletonNo) &&
+               !isUniqueLT(LevelType::SingletonNuNo) &&
+               isUniqueLT(LevelType::LooseCompressed) &&
+               !isUniqueLT(LevelType::LooseCompressedNu) &&
+               isUniqueLT(LevelType::LooseCompressedNo) &&
+               !isUniqueLT(LevelType::LooseCompressedNuNo) &&
+               isUniqueLT(LevelType::TwoOutOfFour)),
               "isUniqueLT definition is broken");
 
 /// Bit manipulations for affine encoding.
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index 0b12bce8996a98b..7fcd1bc2a384a58 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -278,7 +278,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
     // A level-type for each level of the sparse storage
     // (consists of a level-format combined with level-properties).
     ArrayRefParameter<
-      "::mlir::sparse_tensor::DimLevelType",
+      "::mlir::sparse_tensor::LevelType",
       "level-types"
       >: $lvlTypes,
 
@@ -302,7 +302,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
   );
 
   let builders = [
-    AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$lvlTypes,
+    AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::LevelType>":$lvlTypes,
                      CArg<"AffineMap", "{}">:$dimToLvl,
                      CArg<"AffineMap", "{}">:$lvlToDim,
                      CArg<"unsigned", "0">:$posWidth,
@@ -366,9 +366,9 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
     //
 
     /// Safely looks up the level-type for the requested level.  (Returns
-    /// `DimLevelType::Dense` for the null encoding, since dense-tensors
+    /// `LevelType::Dense` for the null encoding, since dense-tensors
     /// are always all-dense.)
-    ::mlir::sparse_tensor::DimLevelType getLvlType(::mlir::sparse_tensor::Level l) const;
+    ::mlir::sparse_tensor::LevelType getLvlType(::mlir::sparse_tensor::Level l) const;
 
     bool isDenseLvl(::mlir::sparse_tensor::Level l) const { return isDenseLT(getLvlType(l)); }
     bool isCompressedLvl(::mlir::sparse_tensor::Level l) const { return isCompressedLT(getLvlType(l)); }
@@ -428,7 +428,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
 
     void printSymbols(AffineMap &map, AsmPrinter &printer) const;
     void printDimensions(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const;
-    void printLevels(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::DimLevelType> lvlTypes) const;
+    void printLevels(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::LevelType> lvlTypes) const;
   }];
 
   let genVerifyDecl = 1;
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h
index a8b1f4fb5f5e105..27dc39609cdadd6 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h
@@ -126,7 +126,7 @@ class StorageLayout {
   void foreachField(
       llvm::function_ref<bool(
           FieldIndex /*fieldIdx*/, SparseTensorFieldKind /*fieldKind*/,
-          Level /*lvl (if applicable)*/, DimLevelType /*LT (if applicable)*/)>)
+          Level /*lvl (if applicable)*/, LevelType /*LT (if applicable)*/)>)
       const;
 
   /// Gets the field index for required field.
@@ -165,7 +165,7 @@ inline unsigned getNumDataFieldsFromEncoding(SparseTensorEncodingAttr enc) {
 inline void foreachFieldInSparseTensor(
     SparseTensorEncodingAttr enc,
     llvm::function_ref<bool(FieldIndex, SparseTensorFieldKind, Level,
-                            DimLevelType)>
+                            LevelType)>
         callback) {
   return StorageLayout(enc).foreachField(callback);
 }
@@ -173,7 +173,7 @@ inline void foreachFieldInSparseTensor(
 void foreachFieldAndTypeInSparseTensor(
     SparseTensorType,
     llvm::function_ref<bool(Type, FieldIndex, SparseTensorFieldKind, Level,
-                            DimLevelType)>);
+                            LevelType)>);
 
 } // namespace sparse_tensor
 } // namespace mlir
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
index bc2f54745f62fc0..4eb666d76cd2d6f 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h
@@ -282,8 +282,8 @@ class SparseTensorType {
   /// `ShapedType::Trait<T>::getNumDynamicDims`.
   int64_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); }
 
-  ArrayRef<DimLevelType> getLvlTypes() const { return enc.getLvlTypes(); }
-  DimLevelType getLvlType(Level l) const {
+  ArrayRef<LevelType> getLvlTypes() const { return enc.getLvlTypes(); }
+  LevelType getLvlType(Level l) const {
     // This OOB check is for dense-tensors, since this class knows
     // their lvlRank (whereas STEA::getLvlType will/can only check
     // OOB for sparse-tensors).
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
index b05695e99f8dc3a..4a34bb2e003e88c 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
@@ -56,8 +56,8 @@ using LatPointId = unsigned;
 /// for the corresponding `SmallVector<LatPointId>` object.
 using LatSetId = unsigned;
 
-/// A pair of level and its corresponding DimLevelType of a tensor.
-using LvlLTPair = std::pair<Level, DimLevelType>;
+/// A pair of level and its corresponding LevelType of a tensor.
+using LvlLTPair = std::pair<Level, LevelType>;
 
 /// A pair of loop id and its coefficients. E.g., for affine expression in the
 /// affine map `2 * d0`, loop id = 0, coefficient = 2.
@@ -395,13 +395,13 @@ class Merger {
   bool hasSparseIdxReduction(const BitVector &bits) const;
 
   /// Gets the level-type of the `t`th tensor on `i`th loop.
-  DimLevelType getLvlType(TensorId t, LoopId i) const {
+  LevelType getLvlType(TensorId t, LoopId i) const {
     assert(isValidTensorId(t) && isValidLoopId(i));
     return lvlTypes[t][i];
   }
 
   /// Gets the level-type of the TensorLoopId.
-  DimLevelType getLvlType(TensorLoopId b) const {
+  LevelType getLvlType(TensorLoopId b) const {
     return getLvlType(tensor(b), loop(b));
   }
 
@@ -422,7 +422,7 @@ class Merger {
 
   /// Sets the level number and level-type of the `t`th tensor on
   /// `i`th loop.
-  void setLevelAndType(TensorId t, LoopId i, Level lvl, DimLevelType lt) {
+  void setLevelAndType(TensorId t, LoopId i, Level lvl, LevelType lt) {
     assert(isValidLevel(t, lvl) && isValidLoopId(i) && isValidLT(lt));
     lvlTypes[t][i] = lt;
     loopToLvl[t][i] = lvl;
@@ -432,7 +432,7 @@ class Merger {
   }
 
   using ForeachTensorLoopIdCallback = function_ref<void(
-      TensorLoopId, TensorId, std::optional<Level>, DimLevelType, bool)>;
+      TensorLoopId, TensorId, std::optional<Level>, LevelType, bool)>;
 
   /// Iterates over a set of `TensorLoopId`s, invoking the callback
   /// for each `TensorLoopId` and passing it the corresponding tensor
@@ -469,7 +469,7 @@ class Merger {
 
   /// Establishes the two-way map that i <-> <t, lvl, lt>.
   void setLoopDependentTensorLevel(LoopId i, TensorId t, Level lvl,
-                                   DimLevelType lt, unsigned coefficient) {
+                                   LevelType lt, unsigned coefficient) {
     assert(isValidLoopId(i) && isValidLevel(t, lvl));
     assert(!loopToUnresolvedLvls[i][t].has_value()); // must be the first def
     loopToUnresolvedLvls[i][t] = std::make_pair(lvl, lt);
@@ -520,7 +520,7 @@ class Merger {
     return loopToUnresolvedLvls[loop(b)][tensor(b)]->first;
   }
 
-  DimLevelType getLoopDependentLevelType(TensorLoopId b) const {
+  LevelType getLoopDependentLevelType(TensorLoopId b) const {
     assert(isLvlWithNonTrivialIdxExp(b));
     return loopToUnresolvedLvls[loop(b)][tensor(b)]->second;
   }
@@ -636,7 +636,7 @@ class Merger {
   // does not.
 
   /// Map that converts pair<TensorId, LoopId> to the corresponding lvl-type.
-  std::vector<std::vector<DimLevelType>> lvlTypes;
+  std::vector<std::vector<LevelType>> lvlTypes;
 
   /// Map that converts pair<TensorId, LoopId> to the corresponding lvl.
   std::vector<std::vector<std::optional<Level>>> loopToLvl;
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
index 6b4a174596ffef6..ccdc605d756433d 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
@@ -197,7 +197,7 @@ class SparseTensorReader final {
   template <typename P, typename I, typename V>
   SparseTensorStorage<P, I, V> *
   readSparseTensor(uint64_t lvlRank, const uint64_t *lvlSizes,
-                   const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                   const LevelType *lvlTypes, const uint64_t *dim2lvl,
                    const uint64_t *lvl2dim) {
     const uint64_t dimRank = getRank();
     MapRef map(dimRank, lvlRank, dim2lvl, lvl2dim);
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 29cc65364bcf525..19c49e6c487dff7 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -70,7 +70,7 @@ class SparseTensorStorageBase {
   /// Constructs a new sparse-tensor storage object with the given encoding.
   SparseTensorStorageBase(uint64_t dimRank, const uint64_t *dimSizes,
                           uint64_t lvlRank, const uint64_t *lvlSizes,
-                          const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                          const LevelType *lvlTypes, const uint64_t *dim2lvl,
                           const uint64_t *lvl2dim);
   virtual ~SparseTensorStorageBase() = default;
 
@@ -99,10 +99,10 @@ class SparseTensorStorageBase {
   }
 
   /// Gets the level-types array.
-  const std::vector<DimLevelType> &getLvlTypes() const { return lvlTypes; }
+  const std::vector<LevelType> &getLvlTypes() const { return lvlTypes; }
 
   /// Safely looks up the type of the given level.
-  DimLevelType getLvlType(uint64_t l) const {
+  LevelType getLvlType(uint64_t l) const {
     assert(l < getLvlRank());
     return lvlTypes[l];
   }
@@ -180,7 +180,7 @@ class SparseTensorStorageBase {
 private:
   const std::vector<uint64_t> dimSizes;
   const std::vector<uint64_t> lvlSizes;
-  const std::vector<DimLevelType> lvlTypes;
+  const std::vector<LevelType> lvlTypes;
   const std::vector<uint64_t> dim2lvlVec;
   const std::vector<uint64_t> lvl2dimVec;
 
@@ -203,7 +203,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
   /// doesn't entail `!(positions[l].empty())`.
   SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
                       uint64_t lvlRank, const uint64_t *lvlSizes,
-                      const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                      const LevelType *lvlTypes, const uint64_t *dim2lvl,
                       const uint64_t *lvl2dim)
       : SparseTensorStorageBase(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes,
                                 dim2lvl, lvl2dim),
@@ -219,7 +219,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
   /// some other form of initialization.
   SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
                       uint64_t lvlRank, const uint64_t *lvlSizes,
-                      const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                      const LevelType *lvlTypes, const uint64_t *dim2lvl,
                       const uint64_t *lvl2dim, SparseTensorCOO<V> *lvlCOO,
                       bool initializeValuesIfAllDense);
 
@@ -228,7 +228,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
   /// overhead-storage allocation as the ctor above.
   SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
                       uint64_t lvlRank, const uint64_t *lvlSizes,
-                      const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                      const LevelType *lvlTypes, const uint64_t *dim2lvl,
                       const uint64_t *lvl2dim, SparseTensorCOO<V> &lvlCOO);
 
   /// Constructs a sparse tensor with the given encoding, and initializes
@@ -240,19 +240,19 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
   /// passed in as a single AoS memory.
   SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
                       uint64_t lvlRank, const uint64_t *lvlSizes,
-                      const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                      const LevelType *lvlTypes, const uint64_t *dim2lvl,
                       const uint64_t *lvl2dim, const intptr_t *lvlBufs);
 
   /// Allocates a new empty sparse tensor.
   static SparseTensorStorage<P, C, V> *
   newEmpty(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-           const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+           const uint64_t *lvlSizes, const LevelType *lvlTypes,
            const uint64_t *dim2lvl, const uint64_t *lvl2dim, bool forwarding);
 
   /// Allocates a new sparse tensor and initializes it from the given COO.
   static SparseTensorStorage<P, C, V> *
   newFromCOO(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-             const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+             const uint64_t *lvlSizes, const LevelType *lvlTypes,
              const uint64_t *dim2lvl, const uint64_t *lvl2dim,
              SparseTensorCOO<V> &lvlCOO);
 
@@ -261,7 +261,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
   static SparseTensorStorage<P, C, V> *
   packFromLvlBuffers(uint64_t dimRank, const uint64_t *dimSizes,
                      uint64_t lvlRank, const uint64_t *lvlSizes,
-                     const DimLevelType *lvlTypes, const uint64_t *dim2lvl,
+                     const LevelType *lvlTypes, const uint64_t *dim2lvl,
                      const uint64_t *lvl2dim, uint64_t srcRank,
                      const intptr_t *buffers);
 
@@ -294,7 +294,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
   void lexInsert(const uint64_t *lvlCoords, V val) final {
     assert(lvlCoords);
     bool allDense = std::all_of(getLvlTypes().begin(), getLvlTypes().end(),
-                                [](DimLevelType lt) { return isDenseLT(lt); });
+                                [](LevelType lt) { return isDenseLT(lt); });
     if (allDense) {
       uint64_t lvlRank = getLvlRank();
       uint64_t valIdx = 0;
@@ -654,7 +654,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
 template <typename P, typename C, typename V>
 SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newEmpty(
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim, bool forwarding) {
   SparseTensorCOO<V> *lvlCOO = nullptr;
   if (forwarding)
@@ -667,7 +667,7 @@ SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newEmpty(
 template <typename P, typename C, typename V>
 SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newFromCOO(
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim,
     SparseTensorCOO<V> &lvlCOO) {
   return new SparseTensorStorage<P, C, V>(dimRank, dimSizes, lvlRank, lvlSizes,
@@ -677,7 +677,7 @@ SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newFromCOO(
 template <typename P, typename C, typename V>
 SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::packFromLvlBuffers(
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim, uint64_t srcRank,
     const intptr_t *buffers) {
   return new SparseTensorStorage<P, C, V>(dimRank, dimSizes, lvlRank, lvlSizes,
@@ -693,7 +693,7 @@ SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::packFromLvlBuffers(
 template <typename P, typename C, typename V>
 SparseTensorStorage<P, C, V>::SparseTensorStorage(
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim,
     SparseTensorCOO<V> *lvlCOO, bool initializeValuesIfAllDense)
     : SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes,
@@ -742,7 +742,7 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
 template <typename P, typename C, typename V>
 SparseTensorStorage<P, C, V>::SparseTensorStorage( // NOLINT
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim,
     SparseTensorCOO<V> &lvlCOO)
     : SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes,
@@ -761,7 +761,7 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage( // NOLINT
 template <typename P, typename C, typename V>
 SparseTensorStorage<P, C, V>::SparseTensorStorage(
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim, const intptr_t *lvlBufs)
     : SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes,
                           dim2lvl, lvl2dim) {
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index 0eb0590ac7e5747..8b0829aab0d8d0f 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -52,7 +52,7 @@ extern "C" {
 MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensor( // NOLINT
     StridedMemRefType<index_type, 1> *dimSizesRef,
     StridedMemRefType<index_type, 1> *lvlSizesRef,
-    StridedMemRefType<DimLevelType, 1> *lvlTypesRef,
+    StridedMemRefType<LevelType, 1> *lvlTypesRef,
     StridedMemRefType<index_type, 1> *dim2lvlRef,
     StridedMemRefType<index_type, 1> *lvl2dimRef, OverheadType posTp,
     OverheadType crdTp, PrimaryType valTp, Action action, void *ptr);
diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
index 5b5d0136cc2121d..8706c523988b10f 100644
--- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
+++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
@@ -23,30 +23,30 @@ using namespace mlir;
 using namespace mlir::python::adaptors;
 
 static void populateDialectSparseTensorSubmodule(const py::module &m) {
-  py::enum_<MlirSparseTensorDimLevelType>(m, "DimLevelType", py::module_local())
-      .value("dense", MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE)
-      .value("compressed24", MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR)
-      .value("compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED)
-      .value("compressed_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU)
-      .value("compressed_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO)
-      .value("compressed_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO)
-      .value("singleton", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON)
-      .value("singleton_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
-      .value("singleton_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
-      .value("singleton_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO)
-      .value("loose_compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED)
+  py::enum_<MlirSparseTensorLevelType>(m, "LevelType", py::module_local())
+      .value("dense", MLIR_SPARSE_TENSOR_LEVEL_DENSE)
+      .value("compressed24", MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR)
+      .value("compressed", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED)
+      .value("compressed_nu", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU)
+      .value("compressed_no", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO)
+      .value("compressed_nu_no", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO)
+      .value("singleton", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON)
+      .value("singleton_nu", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU)
+      .value("singleton_no", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO)
+      .value("singleton_nu_no", MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO)
+      .value("loose_compressed", MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED)
       .value("loose_compressed_nu",
-             MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU)
+             MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU)
       .value("loose_compressed_no",
-             MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NO)
+             MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO)
       .value("loose_compressed_nu_no",
-             MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU_NO);
+             MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO);
 
   mlir_attribute_subclass(m, "EncodingAttr",
                           mlirAttributeIsASparseTensorEncodingAttr)
       .def_classmethod(
           "get",
-          [](py::object cls, std::vector<MlirSparseTensorDimLevelType> lvlTypes,
+          [](py::object cls, std::vector<MlirSparseTensorLevelType> lvlTypes,
              std::optional<MlirAffineMap> dimToLvl,
              std::optional<MlirAffineMap> lvlToDim, int posWidth, int crdWidth,
              MlirContext context) {
@@ -64,7 +64,7 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
           "lvl_types",
           [](MlirAttribute self) {
             const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self);
-            std::vector<MlirSparseTensorDimLevelType> ret;
+            std::vector<MlirSparseTensorLevelType> ret;
             ret.reserve(lvlRank);
             for (int l = 0; l < lvlRank; ++l)
               ret.push_back(mlirSparseTensorEncodingAttrGetLvlType(self, l));
diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp
index c3ad95527df489f..e4534ad132385f9 100644
--- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp
+++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp
@@ -20,26 +20,25 @@ MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor,
                                       mlir::sparse_tensor::SparseTensorDialect)
 
 // Ensure the C-API enums are int-castable to C++ equivalents.
-static_assert(
-    static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE) ==
-            static_cast<int>(DimLevelType::Dense) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED) ==
-            static_cast<int>(DimLevelType::Compressed) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU) ==
-            static_cast<int>(DimLevelType::CompressedNu) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO) ==
-            static_cast<int>(DimLevelType::CompressedNo) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO) ==
-            static_cast<int>(DimLevelType::CompressedNuNo) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON) ==
-            static_cast<int>(DimLevelType::Singleton) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU) ==
-            static_cast<int>(DimLevelType::SingletonNu) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO) ==
-            static_cast<int>(DimLevelType::SingletonNo) &&
-        static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO) ==
-            static_cast<int>(DimLevelType::SingletonNuNo),
-    "MlirSparseTensorDimLevelType (C-API) and DimLevelType (C++) mismatch");
+static_assert(static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_DENSE) ==
+                      static_cast<int>(LevelType::Dense) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED) ==
+                      static_cast<int>(LevelType::Compressed) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU) ==
+                      static_cast<int>(LevelType::CompressedNu) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO) ==
+                      static_cast<int>(LevelType::CompressedNo) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO) ==
+                      static_cast<int>(LevelType::CompressedNuNo) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON) ==
+                      static_cast<int>(LevelType::Singleton) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU) ==
+                      static_cast<int>(LevelType::SingletonNu) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO) ==
+                      static_cast<int>(LevelType::SingletonNo) &&
+                  static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO) ==
+                      static_cast<int>(LevelType::SingletonNuNo),
+              "MlirSparseTensorLevelType (C-API) and LevelType (C++) mismatch");
 
 bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {
   return isa<SparseTensorEncodingAttr>(unwrap(attr));
@@ -47,13 +46,13 @@ bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {
 
 MlirAttribute
 mlirSparseTensorEncodingAttrGet(MlirContext ctx, intptr_t lvlRank,
-                                MlirSparseTensorDimLevelType const *lvlTypes,
+                                MlirSparseTensorLevelType const *lvlTypes,
                                 MlirAffineMap dimToLvl, MlirAffineMap lvlToDim,
                                 int posWidth, int crdWidth) {
-  SmallVector<DimLevelType> cppLvlTypes;
+  SmallVector<LevelType> cppLvlTypes;
   cppLvlTypes.reserve(lvlRank);
   for (intptr_t l = 0; l < lvlRank; ++l)
-    cppLvlTypes.push_back(static_cast<DimLevelType>(lvlTypes[l]));
+    cppLvlTypes.push_back(static_cast<LevelType>(lvlTypes[l]));
   return wrap(SparseTensorEncodingAttr::get(unwrap(ctx), cppLvlTypes,
                                             unwrap(dimToLvl), unwrap(lvlToDim),
                                             posWidth, crdWidth));
@@ -71,9 +70,9 @@ intptr_t mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr) {
   return cast<SparseTensorEncodingAttr>(unwrap(attr)).getLvlRank();
 }
 
-MlirSparseTensorDimLevelType
+MlirSparseTensorLevelType
 mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl) {
-  return static_cast<MlirSparseTensorDimLevelType>(
+  return static_cast<MlirSparseTensorLevelType>(
       cast<SparseTensorEncodingAttr>(unwrap(attr)).getLvlType(lvl));
 }
 
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
index fb9d1851b740046..b207dfe0d2be2f9 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
@@ -62,7 +62,7 @@ bool DimSpec::isValid(Ranks const &ranks) const {
 // `LvlSpec` implementation.
 //===----------------------------------------------------------------------===//
 
-LvlSpec::LvlSpec(LvlVar var, LvlExpr expr, DimLevelType type)
+LvlSpec::LvlSpec(LvlVar var, LvlExpr expr, LevelType type)
     : var(var), expr(expr), type(type) {
   assert(expr);
   assert(isValidLT(type) && !isUndefLT(type));
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
index 8563d8f7e936ca4..266fd7a3c5837b9 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
@@ -202,10 +202,10 @@ class LvlSpec final {
   /// The level-expression.
   LvlExpr expr;
   /// The level-type (== level-format + lvl-properties).
-  DimLevelType type;
+  LevelType type;
 
 public:
-  LvlSpec(LvlVar var, LvlExpr expr, DimLevelType type);
+  LvlSpec(LvlVar var, LvlExpr expr, LevelType type);
 
   MLIRContext *getContext() const {
     MLIRContext *ctx = expr.tryGetContext();
@@ -217,7 +217,7 @@ class LvlSpec final {
   constexpr bool canElideVar() const { return elideVar; }
   void setElideVar(bool b) { elideVar = b; }
   constexpr LvlExpr getExpr() const { return expr; }
-  constexpr DimLevelType getType() const { return type; }
+  constexpr LevelType getType() const { return type; }
 
   /// Checks whether the variables bound/used by this spec are valid
   /// with respect to the given ranks.
@@ -246,7 +246,7 @@ class DimLvlMap final {
 
   ArrayRef<LvlSpec> getLvls() const { return lvlSpecs; }
   const LvlSpec &getLvl(Level lvl) const { return lvlSpecs[lvl]; }
-  DimLevelType getLvlType(Level lvl) const { return getLvl(lvl).getType(); }
+  LevelType getLvlType(Level lvl) const { return getLvl(lvl).getType(); }
 
   AffineMap getDimToLvlMap(MLIRContext *context) const;
   AffineMap getLvlToDimMap(MLIRContext *context) const;
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMapParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMapParser.cpp
index 6fb69d1397e6cfb..56b435c57d30ac0 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMapParser.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMapParser.cpp
@@ -298,7 +298,7 @@ ParseResult DimLvlMapParser::parseLvlSpec(bool requireLvlVarBinding) {
   const auto type = lvlTypeParser.parseLvlType(parser);
   FAILURE_IF_FAILED(type)
 
-  lvlSpecs.emplace_back(var, expr, static_cast<DimLevelType>(*type));
+  lvlSpecs.emplace_back(var, expr, static_cast<LevelType>(*type));
   return success();
 }
 
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
index ad2d5a651993327..eb7ea63a4e88b85 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
@@ -1,4 +1,4 @@
-//===- LvlTypeParser.h - `DimLevelType` parser ----------------------------===//
+//===- LvlTypeParser.h - `LevelType` parser ----------------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -58,7 +58,7 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
     return failure();
   }
 
-  ERROR_IF(!isValidLT(static_cast<DimLevelType>(properties)),
+  ERROR_IF(!isValidLT(static_cast<LevelType>(properties)),
            "invalid level type: level format doesn't support the properties");
   return properties;
 }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.h b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.h
index 10fb6c8f1c04730..5e2f11b75d4da68 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.h
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.h
@@ -1,4 +1,4 @@
-//===- LvlTypeParser.h - `DimLevelType` parser ------------------*- C++ -*-===//
+//===- LvlTypeParser.h - `LevelType` parser ------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 791aeebee5a328d..686bc021c2667ab 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -62,7 +62,7 @@ static constexpr FieldIndex kDataFieldStartingIdx = 0;
 
 void StorageLayout::foreachField(
     llvm::function_ref<bool(FieldIndex, SparseTensorFieldKind, Level,
-                            DimLevelType)>
+                            LevelType)>
         callback) const {
   const auto lvlTypes = enc.getLvlTypes();
   const Level lvlRank = enc.getLvlRank();
@@ -83,18 +83,18 @@ void StorageLayout::foreachField(
   }
   // The values array.
   if (!(callback(fieldIdx++, SparseTensorFieldKind::ValMemRef, kInvalidLevel,
-                 DimLevelType::Undef)))
+                 LevelType::Undef)))
     return;
   // Put metadata at the end.
   if (!(callback(fieldIdx++, SparseTensorFieldKind::StorageSpec, kInvalidLevel,
-                 DimLevelType::Undef)))
+                 LevelType::Undef)))
     return;
 }
 
 void sparse_tensor::foreachFieldAndTypeInSparseTensor(
     SparseTensorType stt,
     llvm::function_ref<bool(Type, FieldIndex, SparseTensorFieldKind, Level,
-                            DimLevelType)>
+                            LevelType)>
         callback) {
   assert(stt.hasEncoding());
   // Construct the basic types.
@@ -110,28 +110,28 @@ void sparse_tensor::foreachFieldAndTypeInSparseTensor(
   // memref<? x eltType> values
   const Type valMemType = MemRefType::get({ShapedType::kDynamic}, eltType);
 
-  StorageLayout(stt).foreachField(
-      [specType, posMemType, crdMemType, valMemType,
-       callback](FieldIndex fieldIdx, SparseTensorFieldKind fieldKind,
-                 Level lvl, DimLevelType lt) -> bool {
-        switch (fieldKind) {
-        case SparseTensorFieldKind::StorageSpec:
-          return callback(specType, fieldIdx, fieldKind, lvl, lt);
-        case SparseTensorFieldKind::PosMemRef:
-          return callback(posMemType, fieldIdx, fieldKind, lvl, lt);
-        case SparseTensorFieldKind::CrdMemRef:
-          return callback(crdMemType, fieldIdx, fieldKind, lvl, lt);
-        case SparseTensorFieldKind::ValMemRef:
-          return callback(valMemType, fieldIdx, fieldKind, lvl, lt);
-        };
-        llvm_unreachable("unrecognized field kind");
-      });
+  StorageLayout(stt).foreachField([specType, posMemType, crdMemType, valMemType,
+                                   callback](FieldIndex fieldIdx,
+                                             SparseTensorFieldKind fieldKind,
+                                             Level lvl, LevelType lt) -> bool {
+    switch (fieldKind) {
+    case SparseTensorFieldKind::StorageSpec:
+      return callback(specType, fieldIdx, fieldKind, lvl, lt);
+    case SparseTensorFieldKind::PosMemRef:
+      return callback(posMemType, fieldIdx, fieldKind, lvl, lt);
+    case SparseTensorFieldKind::CrdMemRef:
+      return callback(crdMemType, fieldIdx, fieldKind, lvl, lt);
+    case SparseTensorFieldKind::ValMemRef:
+      return callback(valMemType, fieldIdx, fieldKind, lvl, lt);
+    };
+    llvm_unreachable("unrecognized field kind");
+  });
 }
 
 unsigned StorageLayout::getNumFields() const {
   unsigned numFields = 0;
   foreachField([&numFields](FieldIndex, SparseTensorFieldKind, Level,
-                            DimLevelType) -> bool {
+                            LevelType) -> bool {
     numFields++;
     return true;
   });
@@ -141,7 +141,7 @@ unsigned StorageLayout::getNumFields() const {
 unsigned StorageLayout::getNumDataFields() const {
   unsigned numFields = 0; // one value memref
   foreachField([&numFields](FieldIndex fidx, SparseTensorFieldKind, Level,
-                            DimLevelType) -> bool {
+                            LevelType) -> bool {
     if (fidx >= kDataFieldStartingIdx)
       numFields++;
     return true;
@@ -167,7 +167,7 @@ StorageLayout::getFieldIndexAndStride(SparseTensorFieldKind kind,
   }
   foreachField([lvl, kind, &fieldIdx](FieldIndex fIdx,
                                       SparseTensorFieldKind fKind, Level fLvl,
-                                      DimLevelType lt) -> bool {
+                                      LevelType lt) -> bool {
     if ((lvl && fLvl == lvl.value() && kind == fKind) ||
         (kind == fKind && fKind == SparseTensorFieldKind::ValMemRef)) {
       fieldIdx = fIdx;
@@ -343,9 +343,9 @@ Level SparseTensorEncodingAttr::getLvlRank() const {
   return getLvlTypes().size();
 }
 
-DimLevelType SparseTensorEncodingAttr::getLvlType(Level l) const {
+LevelType SparseTensorEncodingAttr::getLvlType(Level l) const {
   if (!getImpl())
-    return DimLevelType::Dense;
+    return LevelType::Dense;
   assert(l < getLvlRank() && "Level is out of bounds");
   return getLvlTypes()[l];
 }
@@ -469,7 +469,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
     return {};
 
   // Process the data from the parsed dictionary value into struct-like data.
-  SmallVector<DimLevelType> lvlTypes;
+  SmallVector<LevelType> lvlTypes;
   SmallVector<SparseTensorDimSliceAttr> dimSlices;
   AffineMap dimToLvl = {};
   AffineMap lvlToDim = {};
@@ -621,9 +621,8 @@ void SparseTensorEncodingAttr::printDimensions(
   }
 }
 
-void SparseTensorEncodingAttr::printLevels(
-    AffineMap &map, AsmPrinter &printer,
-    ArrayRef<DimLevelType> lvlTypes) const {
+void SparseTensorEncodingAttr::printLevels(AffineMap &map, AsmPrinter &printer,
+                                           ArrayRef<LevelType> lvlTypes) const {
   for (unsigned i = 0, n = map.getNumResults() - 1; i < n; i++) {
     map.getResult(i).print(printer.getStream());
     printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
@@ -635,12 +634,10 @@ void SparseTensorEncodingAttr::printLevels(
   }
 }
 
-LogicalResult
-SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
-                                 ArrayRef<DimLevelType> lvlTypes,
-                                 AffineMap dimToLvl, AffineMap lvlToDim,
-                                 unsigned posWidth, unsigned crdWidth,
-                                 ArrayRef<SparseTensorDimSliceAttr> dimSlices) {
+LogicalResult SparseTensorEncodingAttr::verify(
+    function_ref<InFlightDiagnostic()> emitError, ArrayRef<LevelType> lvlTypes,
+    AffineMap dimToLvl, AffineMap lvlToDim, unsigned posWidth,
+    unsigned crdWidth, ArrayRef<SparseTensorDimSliceAttr> dimSlices) {
   if (!acceptBitWidth(posWidth))
     return emitError() << "unexpected position bitwidth: " << posWidth;
   if (!acceptBitWidth(crdWidth))
@@ -652,7 +649,7 @@ SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
       return emitError() << "expected compressed or loose_compressed level "
                             "before singleton level";
     if (!std::all_of(it, lvlTypes.end(),
-                     [](DimLevelType i) { return isSingletonLT(i); }))
+                     [](LevelType i) { return isSingletonLT(i); }))
       return emitError() << "expected all singleton lvlTypes "
                             "following a singleton level";
   }
@@ -891,7 +888,7 @@ RankedTensorType sparse_tensor::getCOOFromTypeWithOrdering(RankedTensorType rtt,
                                                            bool ordered) {
   const SparseTensorType src(rtt);
   const Level lvlRank = src.getLvlRank();
-  SmallVector<DimLevelType> lvlTypes;
+  SmallVector<LevelType> lvlTypes;
   lvlTypes.reserve(lvlRank);
 
   // An unordered and non-unique compressed level at beginning.
@@ -960,7 +957,7 @@ Level mlir::sparse_tensor::toStoredDim(SparseTensorEncodingAttr enc,
 /// irrelevant fields that do not alter the sparse tensor memory layout.
 static SparseTensorEncodingAttr
 getNormalizedEncodingForSpecifier(SparseTensorEncodingAttr enc) {
-  SmallVector<DimLevelType> lts;
+  SmallVector<LevelType> lts;
   for (auto lt : enc.getLvlTypes())
     lts.push_back(*buildLevelType(*getLevelFormat(lt), true, true));
 
@@ -1070,7 +1067,7 @@ static LogicalResult verifyPackUnPack(Operation *op, bool requiresStaticShape,
   bool misMatch = false;
   layout.foreachField([&idx, &misMatch, stt, valTp,
                        lvlTps](FieldIndex fid, SparseTensorFieldKind fKind,
-                               Level lvl, DimLevelType lt) -> bool {
+                               Level lvl, LevelType lt) -> bool {
     if (fKind == SparseTensorFieldKind::StorageSpec)
       return true;
 
@@ -1301,8 +1298,8 @@ void ReinterpretMapOp::build(OpBuilder &odsBuilder, OperationState &odsState,
 LogicalResult ReinterpretMapOp::verify() {
   auto srcStt = getSparseTensorType(getSource());
   auto dstStt = getSparseTensorType(getDest());
-  ArrayRef<DimLevelType> srcLvlTps = srcStt.getLvlTypes();
-  ArrayRef<DimLevelType> dstLvlTps = dstStt.getLvlTypes();
+  ArrayRef<LevelType> srcLvlTps = srcStt.getLvlTypes();
+  ArrayRef<LevelType> dstLvlTps = dstStt.getLvlTypes();
 
   if (srcLvlTps.size() != dstLvlTps.size())
     return emitError("Level rank mismatch between source/dest tensors");
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
index c2f036c3876be3a..7e825dde27830bf 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
@@ -77,10 +77,10 @@ class CodegenEnv {
   const TensorExp &exp(ExprId e) const { return latticeMerger.exp(e); }
   const LatPoint &lat(LatPointId l) const { return latticeMerger.lat(l); }
   ArrayRef<LatPointId> set(LatSetId s) const { return latticeMerger.set(s); }
-  DimLevelType lt(TensorId t, LoopId i) const {
+  LevelType lt(TensorId t, LoopId i) const {
     return latticeMerger.getLvlType(t, i);
   }
-  DimLevelType lt(TensorLoopId b) const { return latticeMerger.getLvlType(b); }
+  LevelType lt(TensorLoopId b) const { return latticeMerger.getLvlType(b); }
 
   unsigned getLoopNum() const { return latticeMerger.getNumLoops(); }
 
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index cb0acdd2be9f7b0..910b605731e2573 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -428,8 +428,8 @@ inline Value constantPrimaryTypeEncoding(OpBuilder &builder, Location loc,
 }
 
 /// Generates a constant of the internal dimension level type encoding.
-inline Value constantDimLevelTypeEncoding(OpBuilder &builder, Location loc,
-                                          DimLevelType lt) {
+inline Value constantLevelTypeEncoding(OpBuilder &builder, Location loc,
+                                       LevelType lt) {
   return constantI8(builder, loc, static_cast<uint8_t>(lt));
 }
 
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index f8bcc0fe12a1093..dd8091a4baa9689 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -295,7 +295,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
   // tensors array (len == numManifestTensor).
   this->tensors.assign(ts.begin(), ts.end());
   // Arrays with len == numTensor.
-  this->lvlTypes.assign(numTensors, std::vector<DimLevelType>());
+  this->lvlTypes.assign(numTensors, std::vector<LevelType>());
   this->lvlSizes.assign(numTensors, std::vector<Value>());
   this->highs.assign(numTensors, std::vector<Value>());
   this->segHi.assign(numTensors, std::vector<Value>());
@@ -330,7 +330,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
       // to the total number of loops (each level can potentially be mapped to
       // one of the loop being generated).
       lvlRank = numLoops;
-      lvlTypes[tid].assign(lvlRank, DimLevelType::Dense);
+      lvlTypes[tid].assign(lvlRank, LevelType::Dense);
     } else {
       const Value t = tensors[tid];
       // a scalar or 0-dimension tensors
@@ -349,7 +349,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
         for (auto lvlTp : enc.getLvlTypes())
           lvlTypes[tid].push_back(lvlTp);
       } else {
-        lvlTypes[tid].assign(lvlRank, DimLevelType::Dense);
+        lvlTypes[tid].assign(lvlRank, LevelType::Dense);
       }
     }
 
@@ -2072,7 +2072,7 @@ bool LoopEmitter::genSliceBegin(OpBuilder &builder, Location loc, TensorId tid,
 
   // Only when the level is sorted, the next-non-empty slice can be computed
   // efficiently.
-  const DimLevelType lvlType = lvlTypes[tid][lvl];
+  const LevelType lvlType = lvlTypes[tid][lvl];
   assert(isOrderedLT(lvlType));
   if (isSingletonLT(lvlType)) {
     llvm_unreachable("TODO: dense level should be easy to support, while "
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
index 320b39765dea4a7..e3e620b92257a85 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
@@ -676,7 +676,7 @@ class LoopEmitter {
   /// Input and (optional) output tensors.
   std::vector<Value> tensors;
   /// Level-types for each `(TensorId, Level)` pair.
-  std::vector<std::vector<DimLevelType>> lvlTypes;
+  std::vector<std::vector<LevelType>> lvlTypes;
   // Sparse iteration information for each `(TensorId, Level)` pair.
   // These arrays are updated to remain current within the current loop.
   // TODO: Clarify which of these are indexed by dstLvl vs srcLvl.
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 90fd3e0d8da199e..e9062b49435f5b7 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -216,7 +216,7 @@ static void createAllocFields(OpBuilder &builder, Location loc,
       stt,
       [&builder, &fields, stt, loc, posHeuristic, crdHeuristic, valHeuristic,
        enableInit](Type fType, FieldIndex fIdx, SparseTensorFieldKind fKind,
-                   Level /*lvl*/, DimLevelType /*lt*/) -> bool {
+                   Level /*lvl*/, LevelType /*lt*/) -> bool {
         assert(fields.size() == fIdx);
         Value field;
         switch (fKind) {
@@ -1155,7 +1155,7 @@ class SparseConvertConverter : public OpConversionPattern<ConvertOp> {
         SparseTensorType(cast<RankedTensorType>(op.getResult().getType())),
         [&rewriter, &fields, srcDesc,
          loc](Type fTp, FieldIndex fIdx, SparseTensorFieldKind fKind, Level lvl,
-              DimLevelType /*lt*/) -> bool {
+              LevelType /*lt*/) -> bool {
           // Simply reuses the storage specifier as it is an SSA value.
           if (fKind == SparseTensorFieldKind::StorageSpec) {
             fields.push_back(srcDesc.getSpecifier());
@@ -1284,7 +1284,7 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
         stt,
         [&rewriter, &fields, &op, &stt,
          loc](Type fType, FieldIndex fIdx, SparseTensorFieldKind fKind,
-              Level /*lvl*/, DimLevelType lt) -> bool {
+              Level /*lvl*/, LevelType lt) -> bool {
           assert(fields.size() == fIdx);
           if (fKind == SparseTensorFieldKind::StorageSpec) {
             fields.push_back(
@@ -1333,7 +1333,7 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
         continue;
 
       // Sets up the memory size by reading the last value in position array.
-      DimLevelType lt = stt.getLvlType(lvl);
+      LevelType lt = stt.getLvlType(lvl);
       // Simply forwards the position index when this is a dense level.
       if (isDenseLT(lt)) {
         memSize = rewriter.create<arith::MulIOp>(loc, lvlSize, memSize);
@@ -1387,10 +1387,10 @@ struct SparseDisassembleOpConverter
     Location loc = op.getLoc();
     SmallVector<Value> retMem;
     SmallVector<Value> retLen;
-    desc.getLayout().foreachField([desc, loc, &rewriter, &op, &retMem, &retLen](
-                                      FieldIndex fid,
-                                      SparseTensorFieldKind fKind, Level lvl,
-                                      DimLevelType lt) -> bool {
+    desc.getLayout().foreachField([desc, loc, &rewriter, &op, &retMem,
+                                   &retLen](FieldIndex fid,
+                                            SparseTensorFieldKind fKind,
+                                            Level lvl, LevelType lt) -> bool {
       if (fKind == SparseTensorFieldKind::StorageSpec)
         return true;
       SparseTensorType stt(desc.getRankedTensorType());
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index abee22bab8e84ff..e6052f2ca894c2d 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -146,7 +146,7 @@ static Value genLvlTypesBuffer(OpBuilder &builder, Location loc,
   SmallVector<Value> lvlTypes;
   lvlTypes.reserve(stt.getLvlRank());
   for (const auto lt : stt.getEncoding().getLvlTypes())
-    lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, lt));
+    lvlTypes.push_back(constantLevelTypeEncoding(builder, loc, lt));
   return allocaBuffer(builder, loc, lvlTypes);
 }
 
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp
index 6b08563dee27fd7..1c6d7bebe37e46c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp
@@ -42,7 +42,7 @@ convertSparseTensorType(RankedTensorType rtp, SmallVectorImpl<Type> &fields) {
       stt,
       [&fields](Type fieldType, FieldIndex fieldIdx,
                 SparseTensorFieldKind /*fieldKind*/, Level /*lvl*/,
-                DimLevelType /*lt*/) -> bool {
+                LevelType /*lt*/) -> bool {
         assert(fieldIdx == fields.size());
         fields.push_back(fieldType);
         return true;
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 5374ab55c5c0d9f..ca3d336fcf1843b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -45,9 +45,8 @@ static bool isZeroValue(Value val) {
 // Helper to detect a sparse tensor type operand.
 static bool isSparseTensor(Value v) {
   auto enc = getSparseTensorEncoding(v.getType());
-  return enc && !llvm::all_of(enc.getLvlTypes(), [](auto lt) {
-           return lt == DimLevelType::Dense;
-         });
+  return enc && !llvm::all_of(enc.getLvlTypes(),
+                              [](auto lt) { return lt == LevelType::Dense; });
 }
 static bool isSparseTensor(OpOperand *op) { return isSparseTensor(op->get()); }
 
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index c793f012bd8ba77..3fb90ef379a5778 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -79,7 +79,7 @@ static bool isInvariantAffine(AffineExpr a, unsigned loopDepth, LoopId ldx,
 /// same index is used more than once. Also rejects compound affine
 /// expressions in sparse dimensions.
 static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
-                       DimLevelType lt, bool setLvlFormat = true) {
+                       LevelType lt, bool setLvlFormat = true) {
   switch (a.getKind()) {
   case AffineExprKind::DimId: {
     const LoopId idx = merger.makeLoopId(cast<AffineDimExpr>(a).getPosition());
@@ -125,7 +125,7 @@ static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
 ///
 /// TODO: constant should be easy to handle.
 static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
-                          AffineExpr a, DimLevelType lt, bool isSubExp = false,
+                          AffineExpr a, LevelType lt, bool isSubExp = false,
                           int64_t coefficient = 1) {
   switch (a.getKind()) {
   case AffineExprKind::DimId: {
@@ -275,7 +275,7 @@ static bool findSparseAnnotations(CodegenEnv &env, bool idxReducBased) {
     // to be sliced.
     for (Level l = 0; l < lvlRank; l++) {
       const AffineExpr a = map.getResult(l);
-      const DimLevelType lt = enc.getLvlType(l);
+      const LevelType lt = enc.getLvlType(l);
       if (idxReducBased && needIdxReduc) {
         if (!findDepIdxSet(env.merger(), tid, l, a, lt))
           return false; // inadmissible affine expression
@@ -883,8 +883,8 @@ static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId ldx,
   Value cond;
   env.merger().foreachTensorLoopId(
       p, /*simple=*/true,
-      [&](TensorLoopId b, TensorId tid, std::optional<Level> lvl,
-          DimLevelType lt, bool isIdxRed) {
+      [&](TensorLoopId b, TensorId tid, std::optional<Level> lvl, LevelType lt,
+          bool isIdxRed) {
         if (isIdxRed) {
           // Since there is no 1:1 mapping from loop to level (multiple loops
           // are required to resolve one level with non-trivial index
@@ -970,7 +970,7 @@ static bool startLoopSeq(CodegenEnv &env, OpBuilder &builder, ExprId exp,
   SmallVector<TensorLevel> tidLvls;
   env.merger().foreachTensorLoopId(l0, [&](TensorLoopId b, TensorId tid,
                                            std::optional<Level> lvl,
-                                           DimLevelType lt, bool isIdxReduc) {
+                                           LevelType lt, bool isIdxReduc) {
     assert(env.merger().loop(b) == idx);
     if (isDenseLT(lt) || isUndefLT(lt)) {
       if (tid == env.merger().getSynTensorID()) {
@@ -1048,89 +1048,89 @@ static bool translateBitsToTidLvlPairs(
 
   unsigned numloopCond = 0;
   bool hasNonUnique = false;
-  env.merger().foreachTensorLoopId(
-      li, [&, ldx](TensorLoopId b, TensorId tid, std::optional<Level> lvl,
-                   DimLevelType lt, bool isIdxReduc) {
-        if (simple[b]) {
-          if (isIdxReduc) {
-            tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
-            numloopCond++;
-            return;
-          }
-          if (isUndefLT(lt)) {
-            // An undefined lt in the lattices, we probably mean to
-            // iterate based on the level of output tensor.  E.g., this
-            // could be a synthetic tensor (for invariants and sparse
-            // output tensor).
-            auto itType = env.op().getIteratorTypesArray()[ldx];
-            if (linalg::isReductionIterator(itType) &&
-                env.merger().getSynTensorID() == tid) {
-              // Coiterating with an invariant, and this is a reduction loop
-              // e.g., out = prod(in[i][j] op invariant);
-              // In this case, we can not infer the loop bound from output
-              // (whose level is reduced). Instead we use the synthetic tensor
-              // to infer the bound.
-              // The level of the synthetic tensor is the current loop depth;
-              // the rank of the synthetic tensor equals to number of loops.
-              lvl = env.emitter().getCurrentDepth();
-            } else {
-              // or a broadcast
-              // out[i][j] = in[i] (j is undef for input)
-              tid = outTid;
-              lvl = outLvl;
-              // Skips invalid lvl (e.g., when this is a zero ranked tensor).
-              if (!lvl)
-                return;
-            }
-          }
-          hasNonUnique = !isUniqueLT(lt) || hasNonUnique;
-          tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
-          numloopCond++;
-        } else if (isDenseLT(lt) || isIdxReduc) {
-          tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+  env.merger().foreachTensorLoopId(li, [&, ldx](TensorLoopId b, TensorId tid,
+                                                std::optional<Level> lvl,
+                                                LevelType lt, bool isIdxReduc) {
+    if (simple[b]) {
+      if (isIdxReduc) {
+        tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+        numloopCond++;
+        return;
+      }
+      if (isUndefLT(lt)) {
+        // An undefined lt in the lattices, we probably mean to
+        // iterate based on the level of output tensor.  E.g., this
+        // could be a synthetic tensor (for invariants and sparse
+        // output tensor).
+        auto itType = env.op().getIteratorTypesArray()[ldx];
+        if (linalg::isReductionIterator(itType) &&
+            env.merger().getSynTensorID() == tid) {
+          // Coiterating with an invariant, and this is a reduction loop
+          // e.g., out = prod(in[i][j] op invariant);
+          // In this case, we can not infer the loop bound from output
+          // (whose level is reduced). Instead we use the synthetic tensor
+          // to infer the bound.
+          // The level of the synthetic tensor is the current loop depth;
+          // the rank of the synthetic tensor equals to number of loops.
+          lvl = env.emitter().getCurrentDepth();
         } else {
-          assert(isUndefLT(lt));
-          linalg::GenericOp op = env.op();
-          if (tid >= op.getNumDpsInputs())
-            // We only handle affine expression on input tensors (for now).
-            return;
-          OpOperand *operand = &op->getOpOperand(tid);
-          const auto stt = getSparseTensorType(operand->get());
-          // Non-annotated dense tensors requires no special handling.
-          if (!stt.hasEncoding())
+          // or a broadcast
+          // out[i][j] = in[i] (j is undef for input)
+          tid = outTid;
+          lvl = outLvl;
+          // Skips invalid lvl (e.g., when this is a zero ranked tensor).
+          if (!lvl)
             return;
-
-          ArrayRef<AffineExpr> affines =
-              op.getMatchingIndexingMap(operand).getResults();
-          const Level lvlRank = stt.getLvlRank();
-          assert(affines.size() == static_cast<size_t>(lvlRank));
-          for (Level l = 0; l < lvlRank; l++) {
-            AffineExpr exp = affines[l];
-            // Skip simple affine expression and non-dense levels (which
-            // have their own filter loop).
-            if (isa<AffineDimExpr>(exp) || !stt.isDenseLvl(l))
-              continue;
-
-            // Constant affine expression are handled in genLoop
-            if (!isa<AffineConstantExpr>(exp)) {
-              bool isAtLoop = false;
-              if (isInvariantAffine(exp, env.getLoopDepth(), ldx, isAtLoop) &&
-                  isAtLoop) {
-                // If the compound affine is invariant and we are right at the
-                // level. We need to generate the address according to the
-                // affine expression. This is also the best place we can do it
-                // to avoid putting it inside inner loops.
-                // NOTE: It assumes that the levels of the input tensor are
-                // initialized in order (and it is also currently guaranteed by
-                // computeIterationGraph), another more admissible approach
-                // might be accepting out-of-order access between consecutive
-                // dense levels.
-                affineTidLvls.emplace_back(env.makeTensorLevel(tid, l), exp);
-              }
-            }
+        }
+      }
+      hasNonUnique = !isUniqueLT(lt) || hasNonUnique;
+      tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+      numloopCond++;
+    } else if (isDenseLT(lt) || isIdxReduc) {
+      tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+    } else {
+      assert(isUndefLT(lt));
+      linalg::GenericOp op = env.op();
+      if (tid >= op.getNumDpsInputs())
+        // We only handle affine expression on input tensors (for now).
+        return;
+      OpOperand *operand = &op->getOpOperand(tid);
+      const auto stt = getSparseTensorType(operand->get());
+      // Non-annotated dense tensors requires no special handling.
+      if (!stt.hasEncoding())
+        return;
+
+      ArrayRef<AffineExpr> affines =
+          op.getMatchingIndexingMap(operand).getResults();
+      const Level lvlRank = stt.getLvlRank();
+      assert(affines.size() == static_cast<size_t>(lvlRank));
+      for (Level l = 0; l < lvlRank; l++) {
+        AffineExpr exp = affines[l];
+        // Skip simple affine expression and non-dense levels (which
+        // have their own filter loop).
+        if (isa<AffineDimExpr>(exp) || !stt.isDenseLvl(l))
+          continue;
+
+        // Constant affine expression are handled in genLoop
+        if (!isa<AffineConstantExpr>(exp)) {
+          bool isAtLoop = false;
+          if (isInvariantAffine(exp, env.getLoopDepth(), ldx, isAtLoop) &&
+              isAtLoop) {
+            // If the compound affine is invariant and we are right at the
+            // level. We need to generate the address according to the
+            // affine expression. This is also the best place we can do it
+            // to avoid putting it inside inner loops.
+            // NOTE: It assumes that the levels of the input tensor are
+            // initialized in order (and it is also currently guaranteed by
+            // computeIterationGraph), another more admissible approach
+            // might be accepting out-of-order access between consecutive
+            // dense levels.
+            affineTidLvls.emplace_back(env.makeTensorLevel(tid, l), exp);
           }
         }
-      });
+      }
+    }
+  });
 
   if (isDenseLT(env.lt(outTid, ldx))) {
     // Note that we generate dense indices of the output tensor
diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index 2cfb423f0f81db1..6cdf5f8c0168be2 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -226,8 +226,7 @@ Merger::Merger(unsigned numInputOutputTensors, unsigned numLoops,
       syntheticTensor(numInputOutputTensors),
       numTensors(numInputOutputTensors + 1), numLoops(numLoops),
       hasSparseOut(false),
-      lvlTypes(numTensors,
-               std::vector<DimLevelType>(numLoops, DimLevelType::Undef)),
+      lvlTypes(numTensors, std::vector<LevelType>(numLoops, LevelType::Undef)),
       loopToLvl(numTensors,
                 std::vector<std::optional<Level>>(numLoops, std::nullopt)),
       lvlToLoop(numTensors,
diff --git a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
index ea7e3125b7f47d9..7f8f76f8ec18901 100644
--- a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
@@ -19,7 +19,7 @@ using namespace mlir::sparse_tensor;
 
 SparseTensorStorageBase::SparseTensorStorageBase( // NOLINT
     uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
-    const uint64_t *lvlSizes, const DimLevelType *lvlTypes,
+    const uint64_t *lvlSizes, const LevelType *lvlTypes,
     const uint64_t *dim2lvl, const uint64_t *lvl2dim)
     : dimSizes(dimSizes, dimSizes + dimRank),
       lvlSizes(lvlSizes, lvlSizes + lvlRank),
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index f84fdd3964c14f1..2dcc5d22e2291f0 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -173,7 +173,7 @@ static_assert(std::is_same<index_type, uint64_t>::value,
 void *_mlir_ciface_newSparseTensor( // NOLINT
     StridedMemRefType<index_type, 1> *dimSizesRef,
     StridedMemRefType<index_type, 1> *lvlSizesRef,
-    StridedMemRefType<DimLevelType, 1> *lvlTypesRef,
+    StridedMemRefType<LevelType, 1> *lvlTypesRef,
     StridedMemRefType<index_type, 1> *dim2lvlRef,
     StridedMemRefType<index_type, 1> *lvl2dimRef, OverheadType posTp,
     OverheadType crdTp, PrimaryType valTp, Action action, void *ptr) {
@@ -189,7 +189,7 @@ void *_mlir_ciface_newSparseTensor( // NOLINT
   ASSERT_USIZE_EQ(lvl2dimRef, dimRank);
   const index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef);
   const index_type *lvlSizes = MEMREF_GET_PAYLOAD(lvlSizesRef);
-  const DimLevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef);
+  const LevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef);
   const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef);
   const index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef);
 
diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c
index 3bd1508cf299a3d..b0bc9bb6e881a52 100644
--- a/mlir/test/CAPI/sparse_tensor.c
+++ b/mlir/test/CAPI/sparse_tensor.c
@@ -43,8 +43,8 @@ static int testRoundtripEncoding(MlirContext ctx) {
   MlirAffineMap lvlToDim =
       mlirSparseTensorEncodingAttrGetLvlToDim(originalAttr);
   int lvlRank = mlirSparseTensorEncodingGetLvlRank(originalAttr);
-  enum MlirSparseTensorDimLevelType *lvlTypes =
-      malloc(sizeof(enum MlirSparseTensorDimLevelType) * lvlRank);
+  enum MlirSparseTensorLevelType *lvlTypes =
+      malloc(sizeof(enum MlirSparseTensorLevelType) * lvlRank);
   for (int l = 0; l < lvlRank; ++l) {
     lvlTypes[l] = mlirSparseTensorEncodingAttrGetLvlType(originalAttr, l);
     fprintf(stderr, "level_type: %d\n", lvlTypes[l]);
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
index 86d0c18362d69fd..65a37bc8e731e56 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
@@ -53,7 +53,7 @@ module {
   }
 
   //
-  // The first test suite (for non-singleton DimLevelTypes).
+  // The first test suite (for non-singleton LevelTypes).
   //
   func.func @entry() {
     //
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
index be6c4d43413f1b3..2ace317554a070e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
@@ -72,7 +72,7 @@ module {
   }
 
   //
-  // The first test suite (for non-singleton DimLevelTypes).
+  // The first test suite (for non-singleton LevelTypes).
   //
   func.func @testNonSingleton() {
     //
@@ -125,7 +125,7 @@ module {
   }
 
   //
-  // The second test suite (for singleton DimLevelTypes).
+  // The second test suite (for singleton LevelTypes).
   //
   func.func @testSingleton() {
     //
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index 166140468c8609a..199777c79ef8388 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -140,11 +140,11 @@ def main():
         # straightforward to adapt the code below to explore more combinations.
         # For these simple orderings, dim2lvl and lvl2dim are the same.
         levels = [
-            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
-            [st.DimLevelType.dense, st.DimLevelType.dense],
-            [st.DimLevelType.dense, st.DimLevelType.compressed],
-            [st.DimLevelType.compressed, st.DimLevelType.dense],
-            [st.DimLevelType.compressed, st.DimLevelType.compressed],
+            [st.LevelType.compressed_nu, st.LevelType.singleton],
+            [st.LevelType.dense, st.LevelType.dense],
+            [st.LevelType.dense, st.LevelType.compressed],
+            [st.LevelType.compressed, st.LevelType.dense],
+            [st.LevelType.compressed, st.LevelType.compressed],
         ]
         orderings = [
             ir.AffineMap.get_permutation([0, 1]),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 4a1eab0ba0b048b..0aa4f92a7bf4efc 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -126,11 +126,11 @@ def main():
         e = False
         opt = f"parallelization-strategy=none"
         levels = [
-            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
-            [st.DimLevelType.dense, st.DimLevelType.dense],
-            [st.DimLevelType.dense, st.DimLevelType.compressed],
-            [st.DimLevelType.compressed, st.DimLevelType.dense],
-            [st.DimLevelType.compressed, st.DimLevelType.compressed],
+            [st.LevelType.compressed_nu, st.LevelType.singleton],
+            [st.LevelType.dense, st.LevelType.dense],
+            [st.LevelType.dense, st.LevelType.compressed],
+            [st.LevelType.compressed, st.LevelType.dense],
+            [st.LevelType.compressed, st.LevelType.compressed],
         ]
         orderings = [
             ir.AffineMap.get_permutation([0, 1]),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index 62d1e58eda048b2..d994e8d0a8a19df 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -125,10 +125,10 @@ def main():
         # regular and loose compression and various metadata bitwidths.
         # For these simple orderings, dim2lvl and lvl2dim are the same.
         levels = [
-            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
-            [st.DimLevelType.dense, st.DimLevelType.compressed],
-            [st.DimLevelType.dense, st.DimLevelType.loose_compressed],
-            [st.DimLevelType.compressed, st.DimLevelType.compressed],
+            [st.LevelType.compressed_nu, st.LevelType.singleton],
+            [st.LevelType.dense, st.LevelType.compressed],
+            [st.LevelType.dense, st.LevelType.loose_compressed],
+            [st.LevelType.compressed, st.LevelType.compressed],
         ]
         orderings = [
             (ir.AffineMap.get_permutation([0, 1]), 0),
@@ -149,10 +149,10 @@ def main():
 
         # Now do the same for BSR.
         level = [
-            st.DimLevelType.dense,
-            st.DimLevelType.compressed,
-            st.DimLevelType.dense,
-            st.DimLevelType.dense,
+            st.LevelType.dense,
+            st.LevelType.compressed,
+            st.LevelType.dense,
+            st.LevelType.dense,
         ]
         d0 = ir.AffineDimExpr.get(0)
         d1 = ir.AffineDimExpr.get(1)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
index 01522ff64558c17..82b7d885519fc0d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
@@ -205,7 +205,7 @@ def main():
         levels = list(
             itertools.product(
                 *itertools.repeat(
-                    [st.DimLevelType.dense, st.DimLevelType.compressed], rank
+                    [st.LevelType.dense, st.LevelType.compressed], rank
                 )
             )
         )
diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py
index fe7b41e536e2763..88a5595d75aea92 100644
--- a/mlir/test/python/dialects/sparse_tensor/dialect.py
+++ b/mlir/test/python/dialects/sparse_tensor/dialect.py
@@ -28,7 +28,7 @@ def testEncodingAttr1D():
         # CHECK: equal: True
         print(f"equal: {casted == parsed}")
 
-        # CHECK: lvl_types: [<DimLevelType.compressed: 8>]
+        # CHECK: lvl_types: [<LevelType.compressed: 8>]
         print(f"lvl_types: {casted.lvl_types}")
         # CHECK: dim_to_lvl: (d0) -> (d0)
         print(f"dim_to_lvl: {casted.dim_to_lvl}")
@@ -70,7 +70,7 @@ def testEncodingAttr2D():
         # CHECK: equal: True
         print(f"equal: {casted == parsed}")
 
-        # CHECK: lvl_types: [<DimLevelType.dense: 4>, <DimLevelType.compressed: 8>]
+        # CHECK: lvl_types: [<LevelType.dense: 4>, <LevelType.compressed: 8>]
         print(f"lvl_types: {casted.lvl_types}")
         # CHECK: dim_to_lvl: (d0, d1) -> (d1, d0)
         print(f"dim_to_lvl: {casted.dim_to_lvl}")
diff --git a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
index 2a20327c80b74e5..ce9c0e39b31b953 100644
--- a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
+++ b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
@@ -313,11 +313,11 @@ class MergerTest3T1L : public MergerTestBase {
   MergerTest3T1L() : MergerTestBase(3, 1) {
     EXPECT_TRUE(merger.getOutTensorID() == tid(2));
     // Tensor 0: sparse input vector.
-    merger.setLevelAndType(tid(0), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(0), lid(0), 0, LevelType::Compressed);
     // Tensor 1: sparse input vector.
-    merger.setLevelAndType(tid(1), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(1), lid(0), 0, LevelType::Compressed);
     // Tensor 2: dense output vector.
-    merger.setLevelAndType(tid(2), lid(0), 0, DimLevelType::Dense);
+    merger.setLevelAndType(tid(2), lid(0), 0, LevelType::Dense);
   }
 };
 
@@ -327,13 +327,13 @@ class MergerTest4T1L : public MergerTestBase {
   MergerTest4T1L() : MergerTestBase(4, 1) {
     EXPECT_TRUE(merger.getOutTensorID() == tid(3));
     // Tensor 0: sparse input vector.
-    merger.setLevelAndType(tid(0), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(0), lid(0), 0, LevelType::Compressed);
     // Tensor 1: sparse input vector.
-    merger.setLevelAndType(tid(1), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(1), lid(0), 0, LevelType::Compressed);
     // Tensor 2: sparse input vector
-    merger.setLevelAndType(tid(2), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(2), lid(0), 0, LevelType::Compressed);
     // Tensor 3: dense output vector
-    merger.setLevelAndType(tid(3), lid(0), 0, DimLevelType::Dense);
+    merger.setLevelAndType(tid(3), lid(0), 0, LevelType::Dense);
   }
 };
 
@@ -347,11 +347,11 @@ class MergerTest3T1LD : public MergerTestBase {
   MergerTest3T1LD() : MergerTestBase(3, 1) {
     EXPECT_TRUE(merger.getOutTensorID() == tid(2));
     // Tensor 0: sparse input vector.
-    merger.setLevelAndType(tid(0), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(0), lid(0), 0, LevelType::Compressed);
     // Tensor 1: dense input vector.
-    merger.setLevelAndType(tid(1), lid(0), 0, DimLevelType::Dense);
+    merger.setLevelAndType(tid(1), lid(0), 0, LevelType::Dense);
     // Tensor 2: dense output vector.
-    merger.setLevelAndType(tid(2), lid(0), 0, DimLevelType::Dense);
+    merger.setLevelAndType(tid(2), lid(0), 0, LevelType::Dense);
   }
 };
 
@@ -365,13 +365,13 @@ class MergerTest4T1LU : public MergerTestBase {
   MergerTest4T1LU() : MergerTestBase(4, 1) {
     EXPECT_TRUE(merger.getOutTensorID() == tid(3));
     // Tensor 0: undef input vector.
-    merger.setLevelAndType(tid(0), lid(0), 0, DimLevelType::Undef);
+    merger.setLevelAndType(tid(0), lid(0), 0, LevelType::Undef);
     // Tensor 1: dense input vector.
-    merger.setLevelAndType(tid(1), lid(0), 0, DimLevelType::Dense);
+    merger.setLevelAndType(tid(1), lid(0), 0, LevelType::Dense);
     // Tensor 2: undef input vector.
-    merger.setLevelAndType(tid(2), lid(0), 0, DimLevelType::Undef);
+    merger.setLevelAndType(tid(2), lid(0), 0, LevelType::Undef);
     // Tensor 3: dense output vector.
-    merger.setLevelAndType(tid(3), lid(0), 0, DimLevelType::Dense);
+    merger.setLevelAndType(tid(3), lid(0), 0, LevelType::Dense);
   }
 };
 
@@ -387,11 +387,11 @@ class MergerTest3T1LSo : public MergerTestBase {
     EXPECT_TRUE(merger.getSynTensorID() == tid(3));
     merger.setHasSparseOut(true);
     // Tensor 0: undef input vector.
-    merger.setLevelAndType(tid(0), lid(0), 0, DimLevelType::Undef);
+    merger.setLevelAndType(tid(0), lid(0), 0, LevelType::Undef);
     // Tensor 1: undef input vector.
-    merger.setLevelAndType(tid(1), lid(0), 0, DimLevelType::Undef);
+    merger.setLevelAndType(tid(1), lid(0), 0, LevelType::Undef);
     // Tensor 2: sparse output vector.
-    merger.setLevelAndType(tid(2), lid(0), 0, DimLevelType::Compressed);
+    merger.setLevelAndType(tid(2), lid(0), 0, LevelType::Compressed);
   }
 };
 



More information about the Mlir-commits mailing list