[Mlir-commits] [mlir] 7003e25 - [mlir][sparse] code formatting (NFC) (#74779)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Dec 7 15:46:28 PST 2023


Author: Aart Bik
Date: 2023-12-07T15:46:24-08:00
New Revision: 7003e255d3f1fbff3b2ef3052d478b65ec555963

URL: https://github.com/llvm/llvm-project/commit/7003e255d3f1fbff3b2ef3052d478b65ec555963
DIFF: https://github.com/llvm/llvm-project/commit/7003e255d3f1fbff3b2ef3052d478b65ec555963.diff

LOG: [mlir][sparse] code formatting (NFC) (#74779)

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
index 7825e8fe9bafa4..6c35e2b51ed8f4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
@@ -41,8 +41,8 @@
 module {
 
   func.func @conv2d(%input:  tensor<8x8xi32>,
-               %filter: tensor<3x3xi32>,
-               %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
+                    %filter: tensor<3x3xi32>,
+                    %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
       outs (%output: tensor<6x6xi32>) -> tensor<6x6xi32>
@@ -50,7 +50,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_DCSR(%input:  tensor<8x8xi32, #DCSR>,
-               %filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
+                                    %filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
     %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32, #DCSR>)
@@ -59,7 +59,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSR(%input:  tensor<8x8xi32, #CSR>,
-               %filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
+                                   %filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
     %s = tensor.empty() : tensor<6x6xi32, #CSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32, #CSR>)
@@ -68,7 +68,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CD(%input:  tensor<8x8xi32, #CDR>,
-               %filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
+                                  %filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
     %s = tensor.empty() : tensor<6x6xi32, #CDR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32, #CDR>)
@@ -77,7 +77,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSC(%input:  tensor<8x8xi32, #CSC>,
-               %filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
+                                   %filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
     %s = tensor.empty() : tensor<6x6xi32, #CSC>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32, #CSC>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
index 80946f5388520a..f2907db7d825b6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
@@ -46,8 +46,8 @@
 module {
 
   func.func @conv2d(%input:  tensor<8x8xi32>,
-               %filter: tensor<3x3xi32>,
-               %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
+                    %filter: tensor<3x3xi32>,
+                    %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
       outs (%output: tensor<6x6xi32>) -> tensor<6x6xi32>
@@ -70,7 +70,7 @@ module {
   }
 
   func.func @conv2d_sparse_out(%input:  tensor<8x8xi32>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
+                               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
     %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
@@ -79,7 +79,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_DCSR(%input:  tensor<8x8xi32, #DCSR>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
+                                    %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
     %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32>)
@@ -88,7 +88,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSR(%input:  tensor<8x8xi32, #CSR>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
+                                   %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
     %s = tensor.empty() : tensor<6x6xi32, #CSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32>)
@@ -97,7 +97,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CD(%input:  tensor<8x8xi32, #CDR>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
+                                  %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
     %s = tensor.empty() : tensor<6x6xi32, #CDR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32>)
@@ -106,7 +106,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSC(%input:  tensor<8x8xi32, #CSC>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
+                                   %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
     %s = tensor.empty() : tensor<6x6xi32, #CSC>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32>)
@@ -125,7 +125,6 @@ module {
       [ -1,  0,  1 ]
     ]> : tensor<3x3xi32>
 
-
     %input = arith.constant dense<[
       [  1,  2,  3,  4,  0,  6,  7,  8 ],
       [  2,  2,  4,  4,  0,  0,  6,  8 ],
@@ -270,7 +269,6 @@ module {
       : tensor<6x6xi32>, vector<6x6xi32>
     vector.print %v : vector<6x6xi32>
 
-
     // Release the resources.
     bufferization.dealloc_tensor %sparse_input_DCSR : tensor<8x8xi32, #DCSR>
     bufferization.dealloc_tensor %sparse_input_CSR : tensor<8x8xi32, #CSR>


        


More information about the Mlir-commits mailing list