[Mlir-commits] [mlir] e3bc4db - [mlir][Linalg] Make printer/parser have the same behavior.

Hanhan Wang llvmlistbot at llvm.org
Mon Jun 14 13:38:48 PDT 2021


Author: Hanhan Wang
Date: 2021-06-14T13:38:30-07:00
New Revision: e3bc4dbe8e75faf13798028fcb7710675d8c05ed

URL: https://github.com/llvm/llvm-project/commit/e3bc4dbe8e75faf13798028fcb7710675d8c05ed
DIFF: https://github.com/llvm/llvm-project/commit/e3bc4dbe8e75faf13798028fcb7710675d8c05ed.diff

LOG: [mlir][Linalg] Make printer/parser have the same behavior.

The parser of generic op did not recognize the output from mlir-opt when there
are multiple outputs. One would wrap the result types with braces, and one would
not. The patch makes the behavior the same.

Reviewed By: mravishankar

Differential Revision: https://reviews.llvm.org/D104256

Added: 
    

Modified: 
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/test/Dialect/Linalg/bufferize.mlir
    mlir/test/Dialect/Linalg/canonicalize.mlir
    mlir/test/Dialect/Linalg/invalid.mlir
    mlir/test/Dialect/Linalg/roundtrip.mlir
    mlir/test/Dialect/Linalg/vectorization.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 8114a22ec50e6..2b3ae8909541a 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -3067,9 +3067,8 @@ parseNamedStructuredOpRegion(OpAsmParser &parser, Region &region,
 static ParseResult
 parseNamedStructuredOpResults(OpAsmParser &parser,
                               SmallVectorImpl<Type> &resultTypes) {
-  if (succeeded(parser.parseOptionalArrow()))
-    if (parser.parseTypeList(resultTypes))
-      return failure();
+  if (parser.parseOptionalArrowTypeList(resultTypes))
+    return failure();
   return success();
 }
 

diff  --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir
index 9f3406e27d084..7410adf9f5f35 100644
--- a/mlir/test/Dialect/Linalg/bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/bufferize.mlir
@@ -85,7 +85,7 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
       ^bb0(%gen_arg1: f32, %out1: f32, %out2: f32):
         %tmp1 = math.exp %gen_arg1 : f32
         linalg.yield %tmp1, %tmp1 : f32, f32
-    } -> tensor<4xf32>, tensor<4xf32>
+    } -> (tensor<4xf32>, tensor<4xf32>)
     return %0, %1 : tensor<4xf32>, tensor<4xf32>
 }
 
@@ -118,7 +118,7 @@ func @dynamic_results(%arg0: tensor<?x?xf32>)
       ^bb0(%gen_arg1: f32, %out1: f32, %out2: f32):
         %tmp1 = math.exp %gen_arg1 : f32
         linalg.yield %tmp1, %tmp1 : f32, f32
-    } -> tensor<?x?xf32>, tensor<?x?xf32>
+    } -> (tensor<?x?xf32>, tensor<?x?xf32>)
     return %0, %1 : tensor<?x?xf32>, tensor<?x?xf32>
 }
 

diff  --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 6fa9fc4900f6e..6bd2895fcdc94 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -714,7 +714,7 @@ func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
     outs(%arg_0, %arg_1 : tensor<?xf32>, tensor<?xf32>) {
   ^bb0(%in: f32, %out_0: f32, %out_1: f32):
     linalg.yield %in, %in : f32, f32
-  } -> tensor<?xf32>, tensor<?xf32>
+  } -> (tensor<?xf32>, tensor<?xf32>)
 
   %c0 = constant 0 : index
   %num_elem_0 = memref.dim %0, %c0 : tensor<?xf32>
@@ -778,7 +778,7 @@ func @remove_no_op(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>)
     outs(%3, %3 : tensor<?x?x?xf32>, tensor<?x?x?xf32>) {
   ^bb0(%arg2 : f32, %arg3 : f32, %arg4 : f32, %arg5 : f32):
     linalg.yield %arg3, %arg2 : f32, f32
-  } -> tensor<?x?x?xf32>, tensor<?x?x?xf32>
+  } -> (tensor<?x?x?xf32>, tensor<?x?x?xf32>)
   return %4, %5 : tensor<?x?x?xf32>, tensor<?x?x?xf32>
 }
 // CHECK-LABEL: func @remove_no_op
@@ -832,7 +832,7 @@ func @keep_not_noop(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>)
     outs(%2, %2 : tensor<?x?xf32>, tensor<?x?xf32>) {
     ^bb0(%arg3: f32, %arg4 : f32, %arg5 : f32, %arg6 : f32):
       linalg.yield %arg2, %arg4 : f32, f32
-    } -> tensor<?x?xf32>, tensor<?x?xf32>
+    } -> (tensor<?x?xf32>, tensor<?x?xf32>)
   return %3#0, %3#1 : tensor<?x?xf32>, tensor<?x?xf32>
 }
 // CHECK-LABEL: func @keep_not_noop

diff  --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index ac56add661bcc..a9041e2203c87 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -449,7 +449,7 @@ func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?x
 func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
   // expected-error @+3 {{region expects 3 args, got 2}}
   %res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
-                       -> tensor<?x?xf32>, tensor<?x?xf32>
+                       -> (tensor<?x?xf32>, tensor<?x?xf32>)
   return
 }
 

diff  --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 4675f82852b7d..b0954016cb75f 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -424,6 +424,39 @@ func @generic_with_tensor_input_and_output(
 
 // -----
 
+func @generic_with_multiple_tensor_outputs(
+    %arg0: tensor<?xi32>, %arg1: tensor<?xi32>, %arg2: i32)
+    -> (tensor<i32>, tensor<i32>) {
+  %c0 = constant 0 : index
+  %0 = linalg.init_tensor [] : tensor<i32>
+  %1 = linalg.fill(%0, %arg2) : tensor<i32>, i32 -> tensor<i32>
+  %2 = linalg.init_tensor [] : tensor<i32>
+  %3 = linalg.fill(%2, %arg2) : tensor<i32>, i32 -> tensor<i32>
+  %4:2 = linalg.generic {
+    indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>, affine_map<(d0) -> ()>],
+    iterator_types = ["reduction"]}
+    ins(%arg0, %arg1 : tensor<?xi32>, tensor<?xi32>)
+    outs(%1, %3 : tensor<i32>, tensor<i32>) {
+  ^bb0(%arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32):  // no predecessors
+    %5 = cmpi sge, %arg3, %arg5 : i32
+    %6 = select %5, %arg3, %arg5 : i32
+    %7 = cmpi eq, %arg3, %arg5 : i32
+    %8 = cmpi slt, %arg4, %arg6 : i32
+    %9 = select %8, %arg4, %arg6 : i32
+    %10 = select %5, %arg4, %arg6 : i32
+    %11 = select %7, %9, %10 : i32
+    linalg.yield %6, %11 : i32, i32
+  } -> (tensor<i32>, tensor<i32>)
+  return %4#0, %4#1 : tensor<i32>, tensor<i32>
+}
+// CHECK-LABEL: func @generic_with_multiple_tensor_outputs
+//       CHECK:   %{{.*}} = linalg.generic {
+//  CHECK-SAME:      ins({{.*}} : tensor<?xi32>, tensor<?xi32>)
+//  CHECK-SAME:     outs({{.*}} : tensor<i32>, tensor<i32>)
+//       CHECK:   } -> (tensor<i32>, tensor<i32>)
+
+// -----
+
 #accesses_2 = [
   affine_map<(i, j, k) -> (j, i)>,
   affine_map<(i, j, k) -> (i, k, i + j)>,

diff  --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index ff594a0f32e79..0705c49a5042f 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -386,9 +386,9 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
   //       CHECK:   %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
     linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32,
       f32, f32, f32, f32, f32, f32, f32, f32
-  } -> tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
+  } -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
     tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
-    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
+    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>)
   //       CHECK:   return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
   return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9:
     tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,


        


More information about the Mlir-commits mailing list