[Mlir-commits] [mlir] 0b92f8a - [mlir][NFC] Address filecheck_lint findings in tosa-to-linalg-named.mlir.

Robert Suderman llvmlistbot at llvm.org
Tue Feb 28 17:10:37 PST 2023


Author: Benjamin Chetioui
Date: 2023-03-01T01:03:01Z
New Revision: 0b92f8afdc8c4680f283a5ec32406d3e47193841

URL: https://github.com/llvm/llvm-project/commit/0b92f8afdc8c4680f283a5ec32406d3e47193841
DIFF: https://github.com/llvm/llvm-project/commit/0b92f8afdc8c4680f283a5ec32406d3e47193841.diff

LOG: [mlir][NFC] Address filecheck_lint findings in tosa-to-linalg-named.mlir.

Reviewed By: rsuderman

Differential Revision: https://reviews.llvm.org/D144971

Added: 
    

Modified: 
    mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
index 06d548f347da8..a9a453f534f05 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
@@ -217,7 +217,7 @@ func.func @avg_pool_f32(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>)
 
   // Compute the sum padding:
   // CHECK: %[[KERNEL:.+]] = tensor.empty() : tensor<4x4xf32>
-  // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum 
+  // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum
   // CHECK-SAME: dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>}
   // CHECK-SAME: ins(%[[PAD]], %[[KERNEL]] : tensor<1x8x36x62xf32>, tensor<4x4xf32>)
   // CHECK-SAME: outs(%[[FILL]] : tensor<1x5x33x62xf32>)
@@ -233,7 +233,7 @@ func.func @avg_pool_f32(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>)
 
   // Divide the sum pooling by the number of summed values.
   // CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<1x5x33x62xf32>
-  // CHECK: %[[GENERIC:.+]] = linalg.generic 
+  // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK-SAME: indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
   // CHECK-SAME: ins(%[[POOL]] : tensor<1x5x33x62xf32>)
   // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x5x33x62xf32>)
@@ -292,12 +292,12 @@ func.func @avg_pool_f32(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>)
 
 // -----
 
-// CHECK-LABLE: @avg_pool_i8
+// CHECK-LABEL: @avg_pool_i8
 func.func @avg_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> (tensor<1x5x33x62xi8>) {
-  // CHECK: %[[GENERIC:.+]] = linalg.generic 
+  // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK-SAME: indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
-  // CHECK-SAME: ins(%[[POOL]] : tensor<1x5x33x62xi32>)
-  // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x5x33x62xi8>)
+  // CHECK-SAME: ins(%[[POOL:.+]] : tensor<1x5x33x62xi32>)
+  // CHECK-SAME: outs(%[[EMPTY:.+]] : tensor<1x5x33x62xi8>)
   // CHECK: ^bb0(%[[IN:.+]]: i32, %{{.+}}: i8)
 
   // Only 
diff erent behavior is how the division is performed.
@@ -346,7 +346,7 @@ func.func @avg_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> (tensor<?x5x33x62xf32>)
   // CHECK: %[[EMPTY:.+]] = tensor.empty(%[[BATCH]]) : tensor<?x5x33x62xf32>
   // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[F0]] : f32) outs(%[[EMPTY]] : tensor<?x5x33x62xf32>)
   // CHECK: %[[KERNEL:.+]] = tensor.empty() : tensor<4x4xf32>
-  // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum 
+  // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum
   // CHECK-SAME: dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>
   // CHECK-SAME: ins(%[[PADDED]], %[[KERNEL]] : tensor<?x8x36x62xf32>, tensor<4x4xf32>)
   // CHECK-SAME: outs(%[[FILL]] : tensor<?x5x33x62xf32>) -> tensor<?x5x33x62xf32>
@@ -637,7 +637,7 @@ func.func @conv3d_f32(%input: tensor<1x49x48x47x27xf32>, %weights: tensor<28x3x4
   // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK-SAME: {indexing_maps = [#map, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]}
   // CHECK-SAME: ins(%arg2, %[[CONV3D]] : tensor<28xf32>, tensor<1x47x45x43x28xf32>)
-  // CHECK--SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xf32>) {
+  // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xf32>) {
   // CHECK: ^bb0(%[[A1:.+]]: f32, %[[A2:.+]]: f32, %{{.+}}: f32):
   // CHECK: %[[ADD:.+]] = arith.addf %[[A1]], %[[A2]] : f32
   // CHECK: linalg.yield %[[ADD]]
@@ -664,7 +664,7 @@ func.func @conv3d_i8(%input: tensor<1x49x48x47x27xi8>, %weights: tensor<28x3x4x5
   // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK-SAME: {indexing_maps = [#map, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]}
   // CHECK-SAME: ins(%arg2, %[[CONV3D]] : tensor<28xi32>, tensor<1x47x45x43x28xi32>)
-  // CHECK--SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xi32>) {
+  // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xi32>) {
   // CHECK: ^bb0(%[[A1:.+]]: i32, %[[A2:.+]]: i32, %{{.+}}: i32):
   // CHECK: %[[ADD:.+]] = arith.addi %[[A1]], %[[A2]] : i32
   // CHECK: linalg.yield %[[ADD]]


        


More information about the Mlir-commits mailing list