[Mlir-commits] [mlir] 78ba3aa - [mlir][sparse] performs a tab cleanup (NFC)
Aart Bik
llvmlistbot at llvm.org
Thu Dec 15 12:12:14 PST 2022
Author: Aart Bik
Date: 2022-12-15T12:12:06-08:00
New Revision: 78ba3aa765d079e1f83bfc4bf30f55ddaed518ce
URL: https://github.com/llvm/llvm-project/commit/78ba3aa765d079e1f83bfc4bf30f55ddaed518ce
DIFF: https://github.com/llvm/llvm-project/commit/78ba3aa765d079e1f83bfc4bf30f55ddaed518ce.diff
LOG: [mlir][sparse] performs a tab cleanup (NFC)
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D140142
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 31d5eb5e0e26..30f864fd20ff 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -451,7 +451,7 @@ def SparseTensor_CompressOp : SparseTensor_Op<"compress",
StridedMemRefRankOf<[Index],[1]>:$added,
Index:$count,
AnySparseTensor:$tensor,
- Variadic<Index>:$indices)>,
+ Variadic<Index>:$indices)>,
Results<(outs AnySparseTensor:$result)> {
string summary = "Compressed an access pattern for insertion";
string description = [{
@@ -477,8 +477,8 @@ def SparseTensor_CompressOp : SparseTensor_Op<"compress",
}];
let assemblyFormat = "$values `,` $filled `,` $added `,` $count"
" `into` $tensor `[` $indices `]` attr-dict"
- " `:` type($values) `,` type($filled) `,` type($added)"
- " `,` type($tensor)";
+ " `:` type($values) `,` type($filled) `,` type($added)"
+ " `,` type($tensor)";
let hasVerifier = 1;
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
index e12e9bfeece1..79d1e3553107 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
@@ -83,8 +83,8 @@ module {
%v2 = arith.constant sparse<
[ [0], [3], [5], [11], [13], [17], [18], [21], [31] ],
[ -2147483648, -2147483647, -1000, -1, 0,
- 1, 1000, 2147483646, 2147483647
- ]
+ 1, 1000, 2147483646, 2147483647
+ ]
> : tensor<32xi32>
%sv1 = sparse_tensor.convert %v1
: tensor<32xf64> to tensor<?xf64, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
index 41b541021c37..6917274e00e6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
@@ -48,7 +48,7 @@ module {
//
func.func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>,
%argx: tensor<7x3xf64>)
- -> tensor<7x3xf64> {
+ -> tensor<7x3xf64> {
%0 = linalg.generic #trait_flatten
ins(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>)
outs(%argx: tensor<7x3xf64>) {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index 943007034974..2c18c2bf305d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -55,7 +55,7 @@ module {
func.func @kernel_matvec(%arga: tensor<?x?xi32, #SparseMatrix>,
%argb: tensor<?xi32>,
%argx: tensor<?xi32>)
- -> tensor<?xi32> {
+ -> tensor<?xi32> {
%0 = linalg.generic #matvec
ins(%arga, %argb: tensor<?x?xi32, #SparseMatrix>, tensor<?xi32>)
outs(%argx: tensor<?xi32>) {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index 0933e827db45..375db9afc26f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -49,7 +49,7 @@ module {
%argc: tensor<?x?xf64>,
%argd: tensor<?x?xf64>,
%arga: tensor<?x?xf64>)
- -> tensor<?x?xf64> {
+ -> tensor<?x?xf64> {
%0 = linalg.generic #mttkrp
ins(%argb, %argc, %argd:
tensor<?x?x?xf64, #SparseTensor>, tensor<?x?xf64>, tensor<?x?xf64>)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
index 36c09a0ea4a2..4978434845d2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
@@ -36,7 +36,7 @@
module {
func.func @redsum(%arga: tensor<?x?x?xi32, #SparseTensor>,
%argb: tensor<?x?x?xi32, #SparseTensor>)
- -> tensor<?x?xi32, #SparseMatrix> {
+ -> tensor<?x?xi32, #SparseMatrix> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arga, %c0 : tensor<?x?x?xi32, #SparseTensor>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
index 7b982f716992..2c1200a0b546 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
@@ -59,7 +59,7 @@ module {
%0 = call @quantized_matmul(%input1, %sparse_input2, %output)
: (tensor<5x3xi8>,
tensor<3x6xi8, #DCSR>,
- tensor<5x6xi32>) -> tensor<5x6xi32>
+ tensor<5x6xi32>) -> tensor<5x6xi32>
//
// Verify the output.
More information about the Mlir-commits
mailing list