[Mlir-commits] [mlir] 945b76d - [mlir][linalg] Fix Linalg roundtrip test.

Alexander Belyaev llvmlistbot at llvm.org
Wed Feb 24 02:31:21 PST 2021


Author: Alexander Belyaev
Date: 2021-02-24T11:31:09+01:00
New Revision: 945b76d42838498740e2aed77a4a8f0168fbe00b

URL: https://github.com/llvm/llvm-project/commit/945b76d42838498740e2aed77a4a8f0168fbe00b
DIFF: https://github.com/llvm/llvm-project/commit/945b76d42838498740e2aed77a4a8f0168fbe00b.diff

LOG: [mlir][linalg] Fix Linalg roundtrip test.

The test did not check whether the operations can be parsed again after
printing them once.

Differential Revision: https://reviews.llvm.org/D97368

Added: 
    

Modified: 
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/test/Dialect/Linalg/roundtrip.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 36e73bbabc37..f6e2994b9718 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1707,15 +1707,15 @@ static void print(OpAsmPrinter &p, TiledLoopOp op) {
     << ")";
 
   if (!op.inputs().empty())
-    p << " ins (" << op.inputs() << ")";
+    p << " ins (" << op.inputs() << ": " << TypeRange(op.inputs()) << ")";
   if (!op.outputs().empty())
-    p << " outs (" << op.outputs() << ")";
+    p << " outs (" << op.outputs() << ":" << TypeRange(op.outputs()) << ")";
 
   if (llvm::any_of(op.iterator_types(), [](Attribute attr) {
         return attr.cast<StringAttr>().getValue() !=
                getParallelIteratorTypeName();
       })) {
-    p << " iterators(" << op.iterator_types() << ")";
+    p << " iterators" << op.iterator_types() << "";
   }
 
   p.printRegion(op.region(), /*printEntryBlockArgs=*/false);
@@ -1792,7 +1792,7 @@ static ParseResult parseTiledLoopOp(OpAsmParser &parser,
   if (succeeded(parser.parseOptionalKeyword("iterators"))) {
     StringAttr iterType;
 
-    if (parser.parseLParen() || parser.parseAttribute(iterType))
+    if (parser.parseLSquare() || parser.parseAttribute(iterType))
       return failure();
     iterTypes.push_back(iterType);
     for (int i = 1, e = ivs.size(); i < e; ++i) {
@@ -1800,7 +1800,7 @@ static ParseResult parseTiledLoopOp(OpAsmParser &parser,
         return failure();
       iterTypes.push_back(iterType);
     }
-    if (parser.parseRParen())
+    if (parser.parseRSquare())
       return failure();
   } else {
     auto parallelIter = builder.getStringAttr(getParallelIteratorTypeName());

diff  --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index cffafa551e50..d034ff48c09f 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -1,10 +1,32 @@
-// RUN: mlir-opt -split-input-file %s | FileCheck %s
+// RUN: mlir-opt %s | mlir-opt | FileCheck %s
+// RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
 
 // TODO: Re-enable LLVM lowering test after IndexedGenericOp is lowered.
 //
 // Test that we can lower all the way to LLVM without crashing, don't check results here.
 // DISABLED: mlir-opt %s --convert-linalg-to-llvm -o=/dev/null 2>&1
 
+// CHECK-DAG: #[[$permute_0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
+// CHECK-DAG: #[[$permute_1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
+// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
+// CHECK-DAG: #[[$reshape5D0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)>
+// CHECK-DAG: #[[$reshape5D1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)>
+// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
+// CHECK-DAG: #[[$reshape5D345:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>
+// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
+// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)>
+// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
+// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
+// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)>
+// CHECK-DAG: #[[$strided3DT:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
+// CHECK-DAG: #[[$strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)>
+
 func @pad_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
                   %pad_value: f32) -> tensor<6x?x?x?xf32> {
   %0 = linalg.pad_tensor %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] {
@@ -107,9 +129,6 @@ func @views(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index
 
 // -----
 
-// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
 func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
           %arg1: memref<?xf32, offset: ?, strides: [1]>,
           %arg2: memref<?xf32, offset: ?, strides: [1]>,
@@ -141,7 +160,6 @@ func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 
 // -----
 
-// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 
 func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
   linalg.fill(%arg0, %arg1) : memref<?xf32, offset: ?, strides: [1]>, f32
@@ -153,9 +171,6 @@ func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
 
 // -----
 
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[$strided3DT:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
-
 func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %0 = transpose %arg0 (i, j, k) -> (k, j, i) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]> to memref<?x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>>
   return
@@ -166,7 +181,6 @@ func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
 
 // -----
 
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: f32) {
   linalg.fill(%arg0, %arg1) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, f32
@@ -178,7 +192,6 @@ func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
 
 // -----
 
-// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 
 func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>,
                 %arg1: memref<?xf32, offset: ?, strides: [1]>) {
@@ -192,9 +205,6 @@ func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>,
 
 // -----
 
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
-// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
 
 func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
                  %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
@@ -206,14 +216,13 @@ func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
 // CHECK-LABEL: func @copy_view3(
 //       CHECK:  %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
 //       CHECK:   linalg.copy(%{{.*}}, %{{.*}}) {
-//  CHECK-SAME:     inputPermutation = #[[$map0]],
-//  CHECK-SAME:     outputPermutation = #[[$map1]]} :
+//  CHECK-SAME:     inputPermutation = #[[$permute_0]],
+//  CHECK-SAME:     outputPermutation = #[[$permute_1]]} :
 //  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>,
 //  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>
 
 // -----
 
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
                  %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
@@ -231,7 +240,6 @@ func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
 
 // -----
 
-// CHECK-DAG: #[[$strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)>
 
 func @conv_view6(%arg0: memref<?x?x?x?x?x?xf32, offset: ?, strides: [?, ?, ?, ?, ?, 1]>,
                  %arg1: memref<?x?x?x?x?x?xf32, offset: ?, strides: [?, ?, ?, ?, ?, 1]>,
@@ -315,23 +323,20 @@ func @pooling_sum(%arg0: memref<?x?x?xf32>,
 
 // -----
 
-// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-
-#accesses = [
+#accesses_0 = [
   affine_map<(i, j, k) -> (j, i)>,
   affine_map<(i, j, k) -> (i, k, i + j)>
 ]
 
-#trait = {
-  indexing_maps = #accesses,
+#trait_0 = {
+  indexing_maps = #accesses_0,
   iterator_types = ["parallel", "parallel", "parallel"],
   library_call = "some_external_function_name_1"
 }
 
 func @generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
               %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
-  linalg.generic #trait
+  linalg.generic #trait_0
        ins(%arg0 : memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>)
       outs(%arg1 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>)
       attrs = {foo = 1} {
@@ -352,7 +357,7 @@ func @generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
 
 func @generic_with_tensor_input(%arg0: tensor<?x?xvector<3x4xi4>>,
                                 %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
-  linalg.generic #trait
+  linalg.generic #trait_0
        ins(%arg0 : tensor<?x?xvector<3x4xi4>>)
       outs(%arg1 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>)
       attrs = {foo = 1} {
@@ -390,14 +395,14 @@ func @generic_without_inputs(%arg0 : memref<?x?x?xf32>) {
 
 // -----
 
-#accesses2 = [
+#accesses_1 = [
   affine_map<(i, j, k) -> (j, i)>,
   affine_map<(i, j, k) -> (i, k, i + j)>,
   affine_map<(i, j, k) -> (i, k, i + j)>
 ]
 
-#trait2 = {
-  indexing_maps = #accesses2,
+#trait_1 = {
+  indexing_maps = #accesses_1,
   iterator_types = ["parallel", "parallel", "parallel"],
   library_call = "some_external_function_name_1"
 }
@@ -405,7 +410,7 @@ func @generic_without_inputs(%arg0 : memref<?x?x?xf32>) {
 func @generic_with_tensor_input_and_output(
     %arg0: tensor<?x?xvector<3x4xi4>>, %arg1: tensor<?x?x?xf32>)
     -> (tensor<?x?x?xf32>) {
-  %0 = linalg.generic #trait2
+  %0 = linalg.generic #trait_1
        ins(%arg0, %arg1 : tensor<?x?xvector<3x4xi4>>, tensor<?x?x?xf32>)
       outs(%arg1 : tensor<?x?x?xf32>)
       attrs = {foo = 1} {
@@ -427,14 +432,14 @@ func @generic_with_tensor_input_and_output(
 
 // -----
 
-#accesses3 = [
+#accesses_2 = [
   affine_map<(i, j, k) -> (j, i)>,
   affine_map<(i, j, k) -> (i, k, i + j)>,
   affine_map<(i, j, k) -> (i, k, i + j)>
 ]
 
-#trait3 = {
-  indexing_maps = #accesses3,
+#trait_2 = {
+  indexing_maps = #accesses_2,
   iterator_types = ["parallel", "parallel", "parallel"],
   library_call = "some_external_function_name_1"
 }
@@ -442,7 +447,7 @@ func @generic_with_tensor_input_and_output(
 func @indexed_generic_with_tensor_input_and_output(
     %arg0: tensor<?x?xvector<3x4xi4>>, %arg1: tensor<?x?x?xf32>)
     -> (tensor<?x?x?xf32>) {
-  %0 = linalg.indexed_generic #trait3
+  %0 = linalg.indexed_generic #trait_2
        ins(%arg0, %arg1 : tensor<?x?xvector<3x4xi4>>, tensor<?x?x?xf32>)
       outs(%arg1 : tensor<?x?x?xf32>)
       attrs = {foo = 1} {
@@ -499,23 +504,21 @@ func @indexed_generic_op_zero_rank(%arg0: tensor<f32>, %arg1 : tensor<3x4xf32>)
 
 // -----
 
-// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
-#accesses = [
+#accesses_3 = [
   affine_map<(i, j, k) -> (j, i)>,
   affine_map<(i, j, k) -> (i, k, i + j)>
 ]
 
-#trait3 = {
-  indexing_maps = #accesses,
+#trait_3 = {
+  indexing_maps = #accesses_3,
   iterator_types = ["parallel", "parallel", "parallel"],
   library_call = "some_external_function_name_2"
 }
 
 func @generic_region(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
                      %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
-  linalg.generic #trait3
+  linalg.generic #trait_3
        ins(%arg0 : memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>)
       outs(%arg1 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>)
       attrs = {foo = 1} {
@@ -537,7 +540,7 @@ func @generic_region(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1
 
 func @indexed_generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
                       %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
-  linalg.indexed_generic #trait3
+  linalg.indexed_generic #trait_3
        ins(%arg0 : memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>)
       outs(%arg1 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>)
       attrs = {foo = 1} {
@@ -560,15 +563,6 @@ func @indexed_generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?,
 
 // -----
 
-// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
-// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)>
-// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
-// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
-// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
-// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
-
 func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2: tensor<3x?x5xf32>) {
   // Reshapes that collapse and expand back a contiguous buffer.
   %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>,
@@ -640,11 +634,6 @@ func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2:
 
 // -----
 
-// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
-// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)>
-
 func @reshape_dynamic(%arg0: memref<?x?x?xf32>,
                       %arg1: memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>,
                       %arg2: memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]>) {
@@ -673,9 +662,6 @@ func @reshape_dynamic(%arg0: memref<?x?x?xf32>,
   return
 }
 
-// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
-
 // CHECK-LABEL: func @reshape
 //       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
 //  CHECK-SAME:     memref<?x?x?xf32> into memref<?x?xf32>
@@ -762,11 +748,8 @@ func @legal_collapsing_reshape_dynamic_tensor
     tensor<?x?x?x4x?xf32> into tensor<?x?x?xf32>
   return %0 : tensor<?x?x?xf32>
 }
-// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)>
-// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)>
-// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>
 //     CHECK: func @legal_collapsing_reshape_dynamic_tensor
-//     CHECK:   linalg.tensor_reshape %{{.+}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//     CHECK:   linalg.tensor_reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]]
 
 // -----
 
@@ -780,11 +763,8 @@ func @legal_collapsing_reshape_dynamic_memref
     memref<?x?x?x4x?xf32> into memref<?x?x?xf32>
   return %0 : memref<?x?x?xf32>
 }
-// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)>
-// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)>
-// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>
 //     CHECK: func @legal_collapsing_reshape_dynamic_memref
-//     CHECK:   linalg.reshape %{{.+}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//     CHECK:   linalg.reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]]
 
 // -----
 
@@ -797,14 +777,14 @@ func @fill_tensor(%arg0 : index, %arg1 : index, %arg2 : f32) -> tensor<?x?xf32>
 
 // -----
 
-#accesses = [
+#accesses_4 = [
   affine_map<(i, j) -> (i, j)>,
   affine_map<(i, j) -> (i, j)>,
   affine_map<(i, j) -> (i, j)>
 ]
 
-#trait = {
-  indexing_maps = #accesses,
+#trait_4 = {
+  indexing_maps = #accesses_4,
   iterator_types = ["parallel", "parallel"]
 }
 
@@ -825,7 +805,7 @@ func @tiled_loop(%lhs: tensor<24x64xi8>, %rhs: tensor<24x64xi8>,
     %out_sub = subtensor %out[%i, 0] [%c4, %c64] [1, 1]
         : tensor<24x64xi8> to tensor<?x?xi8>
 
-    %sum = linalg.generic #trait
+    %sum = linalg.generic #trait_4
         ins(%lhs_sub, %rhs_sub : tensor<?x?xi8>, tensor<?x?xi8>)
         outs(%out_sub : tensor<?x?xi8>) {
       ^bb(%l: i8, %r: i8, %o: i8) :
@@ -840,7 +820,7 @@ func @tiled_loop(%lhs: tensor<24x64xi8>, %rhs: tensor<24x64xi8>,
   return %prod : tensor<24x64xi8>
 }
 // CHECK-LABEL: func @tiled_loop
-// CHECK-NOT: iterators(
+// CHECK-NOT: iterators[
 
 // -----
 
@@ -848,7 +828,7 @@ func @tiled_loop(%lhs: tensor<24x64xi8>, %rhs: tensor<24x64xi8>,
 #id_2d = affine_map<(d0, d1, d2) -> (d0, d2)>
 #id_1d = affine_map<(d0, d1, d2) -> (d1)>
 
-#trait = {
+#trait_5 = {
   indexing_maps = [
     #id_3d,
     #id_2d,
@@ -874,7 +854,7 @@ func @tiled_loop_reduction(%input_3d: tensor<16x24x32xf32>,
       = (%c0, %c0, %c0) to (%X, %Y, %Z) step (%c2, %c4, %c8)
       ins(%input_3d, %input_2d: tensor<16x24x32xf32>, tensor<16x32xf32>)
       outs( %output: tensor<24xf32>)
-      iterators("reduction", "parallel", "reduction") {
+      iterators["reduction", "parallel", "reduction"] {
     %sub_3d = subtensor %input_3d[%i, %j, %k][2, 4, 8][1, 1, 1]
       : tensor<16x24x32xf32> to tensor<2x4x8xf32>
     %sub_2d = subtensor %input_2d[%i, %k][2, 8][1, 1]
@@ -883,7 +863,7 @@ func @tiled_loop_reduction(%input_3d: tensor<16x24x32xf32>,
       : tensor<24xf32> to tensor<4xf32>
     %sub_out = subtensor %output[%j] [4] [1]
       : tensor<24xf32> to tensor<4xf32>
-    %acc = linalg.generic #trait
+    %acc = linalg.generic #trait_5
       ins(%sub_3d, %sub_2d, %sub_1d
         : tensor<2x4x8xf32>, tensor<2x8xf32>, tensor<4xf32>)
       outs(%sub_out : tensor<4xf32>)  {
@@ -900,4 +880,4 @@ func @tiled_loop_reduction(%input_3d: tensor<16x24x32xf32>,
   return %result : tensor<24xf32>
 }
 // CHECK-LABEL: func @tiled_loop_reduction
-// CHECK: iterators(
+// CHECK: iterators[


        


More information about the Mlir-commits mailing list