[Mlir-commits] [mlir] [mlir][sparse] Use variable instead of inlining sparse encoding (PR #72561)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Nov 16 12:24:26 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir
Author: Yinying Li (yinying-lisa-li)
<details>
<summary>Changes</summary>
Example:
#CSR = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : dense, d1 : compressed),
}>
// CHECK: #[[$CSR.*]] = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
// CHECK-LABEL: func private @<!-- -->sparse_csr(
// CHECK-SAME: tensor<?x?xf32, **#[[$CSR]]**>)
func.func private @<!-- -->sparse_csr(tensor<?x?xf32, #CSR>)
---
Patch is 438.69 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/72561.diff
49 Files Affected:
- (modified) mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp (+15)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir (+7-7)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir (+7-7)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir (+8-8)
- (modified) mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir (+5-5)
- (modified) mlir/test/Dialect/SparseTensor/codegen.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/dense.mlir (+10-10)
- (modified) mlir/test/Dialect/SparseTensor/fold.mlir (+2-2)
- (modified) mlir/test/Dialect/SparseTensor/one_trip.mlir (+4-4)
- (modified) mlir/test/Dialect/SparseTensor/pre_rewriting.mlir (+13-13)
- (modified) mlir/test/Dialect/SparseTensor/rejected.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir (+7-7)
- (modified) mlir/test/Dialect/SparseTensor/roundtrip.mlir (+8-8)
- (modified) mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir (+40-20)
- (modified) mlir/test/Dialect/SparseTensor/semi_ring.mlir (+5-5)
- (modified) mlir/test/Dialect/SparseTensor/sorted_coo.mlir (+20-20)
- (modified) mlir/test/Dialect/SparseTensor/sparse_1d.mlir (+100-100)
- (modified) mlir/test/Dialect/SparseTensor/sparse_2d.mlir (+157-157)
- (modified) mlir/test/Dialect/SparseTensor/sparse_3d.mlir (+95-95)
- (modified) mlir/test/Dialect/SparseTensor/sparse_affine.mlir (+35-35)
- (modified) mlir/test/Dialect/SparseTensor/sparse_concat.mlir (+64-64)
- (modified) mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir (+22-22)
- (modified) mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_foreach.mlir (+3-3)
- (modified) mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir (+48-48)
- (modified) mlir/test/Dialect/SparseTensor/sparse_index.mlir (+23-23)
- (modified) mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir (+44-44)
- (modified) mlir/test/Dialect/SparseTensor/sparse_kernels.mlir (+53-53)
- (modified) mlir/test/Dialect/SparseTensor/sparse_lower.mlir (+4-4)
- (modified) mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir (+4-4)
- (modified) mlir/test/Dialect/SparseTensor/sparse_nd.mlir (+6-6)
- (modified) mlir/test/Dialect/SparseTensor/sparse_out.mlir (+98-98)
- (modified) mlir/test/Dialect/SparseTensor/sparse_pack.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_perm.mlir (+7-7)
- (modified) mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir (+5-5)
- (modified) mlir/test/Dialect/SparseTensor/sparse_reinterpret_map.mlir (+11-11)
- (modified) mlir/test/Dialect/SparseTensor/sparse_reshape.mlir (+16-16)
- (modified) mlir/test/Dialect/SparseTensor/sparse_scalars.mlir (+6-6)
- (modified) mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir (+20-20)
- (modified) mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir (+14-14)
- (modified) mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_transpose.mlir (+16-16)
- (modified) mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir (+8-8)
- (modified) mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir (+8-8)
- (modified) mlir/test/Dialect/SparseTensor/spy_sddmm.mlir (+6-6)
- (modified) mlir/test/Dialect/SparseTensor/unused-tensor.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir (+42-42)
``````````diff
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 53a0c2428842f59..28445ce2a8bc243 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -1797,7 +1797,22 @@ Operation *SparseTensorDialect::materializeConstant(OpBuilder &builder,
return nullptr;
}
+namespace {
+struct SparseTensorAsmDialectInterface : public OpAsmDialectInterface {
+ using OpAsmDialectInterface::OpAsmDialectInterface;
+
+ AliasResult getAlias(Attribute attr, raw_ostream &os) const override {
+ if (attr.isa<SparseTensorEncodingAttr>()) {
+ os << "sparse";
+ return AliasResult::OverridableAlias;
+ }
+ return AliasResult::NoAlias;
+ }
+};
+} // namespace
+
void SparseTensorDialect::initialize() {
+ addInterface<SparseTensorAsmDialectInterface>();
addAttributes<
#define GET_ATTRDEF_LIST
#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc"
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir
index 34189d329cc41ee..699508acf13cf75 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir
@@ -6,18 +6,18 @@
// Compute matrix matrix C = AB
//
// CHECK-LABEL: func.func @matmul(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf64, #sparse{{[0-9]*}}>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf64>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<?x?xf64>) -> tensor<?x?xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
+// CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
+// CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
// CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf64>
-// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
-// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
-// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
+// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
+// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
+// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xf64>
// CHECK: %[[VAL_12:.*]] = gpu.wait async
// CHECK: %[[VAL_13:.*]] = memref.dim %[[VAL_9]], %[[VAL_3]] : memref<?xindex>
// CHECK: %[[VAL_14:.*]], %[[VAL_15:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] (%[[VAL_13]]) : memref<?xindex>
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
index bd0bf6927b0da4c..14a0e9c30b20167 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
@@ -7,17 +7,17 @@
module {
// CHECK-LABEL: func.func @matvec(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf64, #sparse{{[0-9]*}}>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?xf64>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<?xf64>) -> tensor<?xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex, strided<[?], offset: ?>>
-// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
+// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
+// CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
+// CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
+// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xf64>
// CHECK: %[[VAL_11:.*]] = gpu.wait async
// CHECK: %[[VAL_12:.*]] = memref.dim %[[VAL_8]], %[[VAL_3]] : memref<?xindex, strided<[?], offset: ?>>
// CHECK: %[[VAL_13:.*]], %[[VAL_14:.*]] = gpu.alloc async {{\[}}%[[VAL_11]]] (%[[VAL_12]]) : memref<?xindex>
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir
index ce7af53bb346278..97f36d49927bf9b 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir
@@ -22,12 +22,12 @@
#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
// CHECK-LABEL: func.func @sparse_sampled_dd(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse{{[0-9]*}}>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf64>,
-// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> {
+// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse{{[0-9]*}}> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
-// CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
// CHECK: %[[VAL_7:.*]] = gpu.wait async
// CHECK: %[[VAL_8:.*]], %[[VAL_9:.*]] = gpu.alloc async {{\[}}%[[VAL_7]]] () : memref<8x8xf64>
@@ -36,9 +36,9 @@
// CHECK: %[[VAL_12:.*]] = gpu.wait async
// CHECK: %[[VAL_13:.*]], %[[VAL_14:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] () : memref<8x8xf64>
// CHECK: %[[VAL_15:.*]] = gpu.memcpy async {{\[}}%[[VAL_14]]] %[[VAL_13]], %[[VAL_11]] : memref<8x8xf64>, memref<8x8xf64>
-// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
+// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xf64>
// CHECK: %[[VAL_19:.*]] = gpu.wait async
// CHECK: %[[VAL_20:.*]] = memref.dim %[[VAL_16]], %[[VAL_4]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]], %[[VAL_22:.*]] = gpu.alloc async {{\[}}%[[VAL_19]]] (%[[VAL_20]]) : memref<?xindex>
@@ -70,8 +70,8 @@
// CHECK: %[[VAL_57:.*]] = gpu.memcpy async {{\[}}%[[VAL_56]]] %[[VAL_18]], %[[VAL_31]] : memref<?xf64>, memref<?xf64>
// CHECK: %[[VAL_58:.*]] = gpu.dealloc async {{\[}}%[[VAL_57]]] %[[VAL_31]] : memref<?xf64>
// CHECK: gpu.wait {{\[}}%[[VAL_58]]]
-// CHECK: %[[VAL_59:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[VAL_59]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: %[[VAL_59:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
+// CHECK: return %[[VAL_59]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
// CHECK: }
//
// A kernel that computes a direct sampled matrix matrix multiplication
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir
index dd79a9017f7f4cb..93f49002a47d2db 100644
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir
@@ -19,14 +19,14 @@
}
// CHECK-LABEL: func.func @SDDMM_block(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32, #sparse{{[0-9]*}}>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf32>,
-// CHECK-SAME: %[[VAL_2:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>> {
+// CHECK-SAME: %[[VAL_2:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32, #sparse{{[0-9]*}}> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 4 : index
-// CHECK: %[[VAL_7:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: %[[VAL_7:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<?x?xf32, #sparse{{[0-9]*}}>
// CHECK: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?xf32>
// CHECK: %[[VAL_9:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf32>
// CHECK: %[[VAL_10:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor<?x?xf32>
@@ -79,8 +79,8 @@
// CHECK: %[[VAL_66:.*]] = gpu.memcpy async {{\[}}%[[VAL_65]]] %[[VAL_27]], %[[VAL_40]] : memref<?xf32>, memref<?xf32>
// CHECK: %[[VAL_67:.*]] = gpu.dealloc async {{\[}}%[[VAL_66]]] %[[VAL_40]] : memref<?xf32>
// CHECK: gpu.wait {{\[}}%[[VAL_67]]]
-// CHECK: %[[VAL_68:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[VAL_68]] : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: %[[VAL_68:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<?x?xf32, #sparse{{[0-9]*}}>
+// CHECK: return %[[VAL_68]] : tensor<?x?xf32, #sparse{{[0-9]*}}>
// CHECK: }
func.func @SDDMM_block(%args: tensor<?x?xf32, #BSR>,
%arga: tensor<?x?xf32>,
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 5876eef6f193272..12d77ecfecc6ac9 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -664,7 +664,7 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
}
// CHECK-LABEL: func.func @sparse_new_coo(
-// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{{{.*}}}>>) {
+// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse{{[0-9]*}}>) {
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i32
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
@@ -712,7 +712,7 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr) -> tensor<?x?xf32, #Coo> {
}
// CHECK-LABEL: func.func @sparse_new_coo_permute_no(
-// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{{{.*}}}>>) {
+// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse{{[0-9]*}}>) {
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i32
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
diff --git a/mlir/test/Dialect/SparseTensor/dense.mlir b/mlir/test/Dialect/SparseTensor/dense.mlir
index 52db814572a15fb..2d8dcfea9adc194 100644
--- a/mlir/test/Dialect/SparseTensor/dense.mlir
+++ b/mlir/test/Dialect/SparseTensor/dense.mlir
@@ -32,14 +32,14 @@
// a non-annotated dense matrix as output.
//
// CHECK-LABEL: func @dense1(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse{{[0-9]*}}>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1.000000e+00 : f32
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
-// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
+// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
@@ -73,14 +73,14 @@ func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
//
// CHECK-LABEL: func @dense2(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> {
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse{{[0-9]*}}>) -> tensor<32x16xf32, #sparse{{[0-9]*}}> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1.000000e+00 : f32
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32x16xf32>
-// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
@@ -90,8 +90,8 @@ func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
// CHECK: memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<?xf32>
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
-// CHECK: return %[[VAL_15]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK: %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
+// CHECK: return %[[VAL_15]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
// CHECK: }
func.func @dense2(%arga: tensor<32x16xf32>,
%argx: tensor<32x16xf32, #DenseMatrix>)
@@ -116,14 +116,14 @@ func.func @dense2(%arga: tensor<32x16xf32>,
//
// CHECK-LABEL: func @dense3(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> {
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse{{[0-9]*}}>) -> tensor<32x16xf32, #sparse{{[0-9]*}}> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 8 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}}>> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
@@ -137,8 +137,8 @@ func.func @dense2(%arga: tensor<32x16xf32>,
// CHECK: memref.store %[[VAL_19:.*]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<?xf32>
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
-// CHECK: return %[[VAL_20]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
+// CHECK: return %[[VAL_20]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
// CHECK: }
func.func @dense3(%arga: tensor<32x16x8xf32>,
%argx: tensor<32x16xf32, #DenseMatrix>)
diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir
index 73851c086aa2425..0153d444215e3e9 100644
--- a/mlir/test/Dialect/SparseTensor/fold.mlir
+++ b/mlir/test/Dialect/SparseTensor/fold.mlir
@@ -21,7 +21,7 @@ func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
}
// CHECK-LABEL: func @sparse_dce_getters(
-// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>)
+// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse{{[0-9]*}}>)
// CHECK-NOT: sparse_tensor.positions
// CHECK-NOT: sparse_tensor.coordinates
// ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/72561
More information about the Mlir-commits
mailing list