[Mlir-commits] [mlir] [mlir][sparse] Expand LevelType to 64 bits and implement n out of m (PR #79935)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Mon Jan 29 19:04:28 PST 2024
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {darker}-->
:warning: Python code formatter, darker found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
darker --check --diff -r c9a6e993f7b349405b6c8f9244cd9cf0f56a6a81...a91ee4a2701822a50dc048563740a60cc00caf05 mlir/test/python/dialects/sparse_tensor/dialect.py
``````````
</details>
<details>
<summary>
View the diff from darker here.
</summary>
``````````diff
--- dialect.py 2024-01-30 01:01:53.000000 +0000
+++ dialect.py 2024-01-30 03:04:21.431573 +0000
@@ -11,89 +11,89 @@
# CHECK-LABEL: TEST: testEncodingAttr1D
@run
def testEncodingAttr1D():
- with Context() as ctx:
- parsed = Attribute.parse(
- "#sparse_tensor.encoding<{"
- " map = (d0) -> (d0 : compressed),"
- " posWidth = 16,"
- " crdWidth = 32"
- "}>"
- )
- # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 16, crdWidth = 32 }>
- print(parsed)
+ with Context() as ctx:
+ parsed = Attribute.parse(
+ "#sparse_tensor.encoding<{"
+ " map = (d0) -> (d0 : compressed),"
+ " posWidth = 16,"
+ " crdWidth = 32"
+ "}>"
+ )
+ # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 16, crdWidth = 32 }>
+ print(parsed)
- casted = st.EncodingAttr(parsed)
- # CHECK: equal: True
- print(f"equal: {casted == parsed}")
+ casted = st.EncodingAttr(parsed)
+ # CHECK: equal: True
+ print(f"equal: {casted == parsed}")
- # CHECK: lvl_types: [<LevelType.compressed: 131072>]
- print(f"lvl_types: {casted.lvl_types}")
- # CHECK: dim_to_lvl: (d0) -> (d0)
- print(f"dim_to_lvl: {casted.dim_to_lvl}")
- # CHECK: lvl_to_dim: (d0) -> (d0)
- print(f"lvl_to_dim: {casted.lvl_to_dim}")
- # CHECK: pos_width: 16
- print(f"pos_width: {casted.pos_width}")
- # CHECK: crd_width: 32
- print(f"crd_width: {casted.crd_width}")
+ # CHECK: lvl_types: [<LevelType.compressed: 131072>]
+ print(f"lvl_types: {casted.lvl_types}")
+ # CHECK: dim_to_lvl: (d0) -> (d0)
+ print(f"dim_to_lvl: {casted.dim_to_lvl}")
+ # CHECK: lvl_to_dim: (d0) -> (d0)
+ print(f"lvl_to_dim: {casted.lvl_to_dim}")
+ # CHECK: pos_width: 16
+ print(f"pos_width: {casted.pos_width}")
+ # CHECK: crd_width: 32
+ print(f"crd_width: {casted.crd_width}")
- created = st.EncodingAttr.get(casted.lvl_types, None, None, 0, 0)
- # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
- print(created)
- # CHECK: created_equal: False
- print(f"created_equal: {created == casted}")
+ created = st.EncodingAttr.get(casted.lvl_types, None, None, 0, 0)
+ # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
+ print(created)
+ # CHECK: created_equal: False
+ print(f"created_equal: {created == casted}")
- # Verify that the factory creates an instance of the proper type.
- # CHECK: is_proper_instance: True
- print(f"is_proper_instance: {isinstance(created, st.EncodingAttr)}")
- # CHECK: created_pos_width: 0
- print(f"created_pos_width: {created.pos_width}")
+ # Verify that the factory creates an instance of the proper type.
+ # CHECK: is_proper_instance: True
+ print(f"is_proper_instance: {isinstance(created, st.EncodingAttr)}")
+ # CHECK: created_pos_width: 0
+ print(f"created_pos_width: {created.pos_width}")
# CHECK-LABEL: TEST: testEncodingAttr2D
@run
def testEncodingAttr2D():
- with Context() as ctx:
- parsed = Attribute.parse(
- "#sparse_tensor.encoding<{"
- " map = (d0, d1) -> (d1 : dense, d0 : compressed),"
- " posWidth = 8,"
- " crdWidth = 32"
- "}>"
- )
- # CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }>
- print(parsed)
+ with Context() as ctx:
+ parsed = Attribute.parse(
+ "#sparse_tensor.encoding<{"
+ " map = (d0, d1) -> (d1 : dense, d0 : compressed),"
+ " posWidth = 8,"
+ " crdWidth = 32"
+ "}>"
+ )
+ # CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }>
+ print(parsed)
- casted = st.EncodingAttr(parsed)
- # CHECK: equal: True
- print(f"equal: {casted == parsed}")
+ casted = st.EncodingAttr(parsed)
+ # CHECK: equal: True
+ print(f"equal: {casted == parsed}")
- # CHECK: lvl_types: [<LevelType.dense: 65536>, <LevelType.compressed: 131072>]
- print(f"lvl_types: {casted.lvl_types}")
- # CHECK: dim_to_lvl: (d0, d1) -> (d1, d0)
- print(f"dim_to_lvl: {casted.dim_to_lvl}")
- # CHECK: lvl_to_dim: (d0, d1) -> (d1, d0)
- print(f"lvl_to_dim: {casted.lvl_to_dim}")
- # CHECK: pos_width: 8
- print(f"pos_width: {casted.pos_width}")
- # CHECK: crd_width: 32
- print(f"crd_width: {casted.crd_width}")
+ # CHECK: lvl_types: [<LevelType.dense: 65536>, <LevelType.compressed: 131072>]
+ print(f"lvl_types: {casted.lvl_types}")
+ # CHECK: dim_to_lvl: (d0, d1) -> (d1, d0)
+ print(f"dim_to_lvl: {casted.dim_to_lvl}")
+ # CHECK: lvl_to_dim: (d0, d1) -> (d1, d0)
+ print(f"lvl_to_dim: {casted.lvl_to_dim}")
+ # CHECK: pos_width: 8
+ print(f"pos_width: {casted.pos_width}")
+ # CHECK: crd_width: 32
+ print(f"crd_width: {casted.crd_width}")
- created = st.EncodingAttr.get(
- casted.lvl_types,
- casted.dim_to_lvl,
- casted.lvl_to_dim,
- 8,
- 32,
- )
- # CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }>
- print(created)
- # CHECK: created_equal: True
- print(f"created_equal: {created == casted}")
+ created = st.EncodingAttr.get(
+ casted.lvl_types,
+ casted.dim_to_lvl,
+ casted.lvl_to_dim,
+ 8,
+ 32,
+ )
+ # CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }>
+ print(created)
+ # CHECK: created_equal: True
+ print(f"created_equal: {created == casted}")
# CHECK-LABEL: TEST: testEncodingAttrOnTensorType
@run
def testEncodingAttrOnTensorType():
``````````
</details>
https://github.com/llvm/llvm-project/pull/79935
More information about the Mlir-commits
mailing list