[Mlir-commits] [mlir] 573c4db - [mlir][sparse] refine reinterpret_map test cases (#72684)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Nov 17 10:05:00 PST 2023
Author: Peiming Liu
Date: 2023-11-17T10:04:56-08:00
New Revision: 573c4db9478fb8374292f39ae168dbd6a20d9ff5
URL: https://github.com/llvm/llvm-project/commit/573c4db9478fb8374292f39ae168dbd6a20d9ff5
DIFF: https://github.com/llvm/llvm-project/commit/573c4db9478fb8374292f39ae168dbd6a20d9ff5.diff
LOG: [mlir][sparse] refine reinterpret_map test cases (#72684)
Added:
Modified:
mlir/test/Dialect/SparseTensor/sparse_reinterpret_map.mlir
Removed:
################################################################################
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reinterpret_map.mlir b/mlir/test/Dialect/SparseTensor/sparse_reinterpret_map.mlir
index c6a3a366af9f379..46f04cca03ed7df 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_reinterpret_map.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reinterpret_map.mlir
@@ -55,17 +55,19 @@ func.func @mul(%arg0: tensor<32x32xf32>,
)
}>
+// CHECK-DAG: #[[$remap:.*]] = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : dense, d1 floordiv 2 : compressed, d0 mod 2 : dense, d1 mod 2 : dense) }>
+// CHECK-DAG: #[[$demap:.*]] = #sparse_tensor.encoding<{ map = (d0, d1, d2, d3) -> (d0 : dense, d1 : compressed, d2 : dense, d3 : dense) }>
// CHECK-LABEL: func.func @sparse_foreach_reinterpret_map(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x4xf64, #sparse{{[0-9]*}}>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() : tensor<1x2x2x2xf64, #sparse{{[0-9]*}}>
-// CHECK: %[[VAL_2:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] : tensor<2x4xf64, #sparse{{[0-9]*}}>
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x4xf64, #[[$remap]]>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() : tensor<1x2x2x2xf64, #[[$demap]]>
+// CHECK: %[[VAL_2:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] : tensor<2x4xf64, #[[$remap]]> to tensor<1x2x2x2xf64, #[[$demap]]>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.foreach in %[[VAL_2]] init(%[[VAL_1]])
-// CHECK: ^bb0(%[[VAL_5:.*]]: index, %[[VAL_6:.*]]: index, %[[VAL_7:.*]]: index, %[[VAL_8:.*]]: index, %[[VAL_9:.*]]: f64, %[[VAL_10:.*]]: tensor<1x2x2x2xf64, #sparse{{[0-9]*}}>
-// CHECK: %[[VAL_11:.*]] = sparse_tensor.insert %[[VAL_9]] into %[[VAL_10]]{{\[}}%[[VAL_5]], %[[VAL_6]], %[[VAL_7]], %[[VAL_8]]] : tensor<1x2x2x2xf64, #sparse{{[0-9]*}}>
+// CHECK: ^bb0(%[[VAL_5:.*]]: index, %[[VAL_6:.*]]: index, %[[VAL_7:.*]]: index, %[[VAL_8:.*]]: index, %[[VAL_9:.*]]: f64, %[[VAL_10:.*]]: tensor<1x2x2x2xf64, #[[$demap]]>
+// CHECK: %[[VAL_11:.*]] = sparse_tensor.insert %[[VAL_9]] into %[[VAL_10]]{{\[}}%[[VAL_5]], %[[VAL_6]], %[[VAL_7]], %[[VAL_8]]] : tensor<1x2x2x2xf64, #[[$demap]]>
// CHECK: sparse_tensor.yield %[[VAL_11]] : tensor<1x2x2x2xf64, #sparse{{[0-9]*}}>
// CHECK: }
-// CHECK: %[[VAL_12:.*]] = sparse_tensor.reinterpret_map %[[VAL_4]] : tensor<1x2x2x2xf64, #sparse{{[0-9]*}}>
-// CHECK: %[[VAL_13:.*]] = sparse_tensor.load %[[VAL_12]] hasInserts : tensor<2x4xf64, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_12:.*]] = sparse_tensor.reinterpret_map %[[VAL_4]] : tensor<1x2x2x2xf64, #[[$demap]]> to tensor<2x4xf64, #[[$remap]]>
+// CHECK: %[[VAL_13:.*]] = sparse_tensor.load %[[VAL_12]] hasInserts : tensor<2x4xf64, #[[$remap]]>
// CHECK: return %[[VAL_13]] : tensor<2x4xf64, #sparse{{[0-9]*}}>
// CHECK: }
func.func @sparse_foreach_reinterpret_map(%6 : tensor<2x4xf64, #BSR>) -> tensor<2x4xf64, #BSR> {
More information about the Mlir-commits
mailing list