[Mlir-commits] [mlir] b98e5e0 - [mlir] Move Linalg tensors-to-buffers tests to Linalg tests.
Alexander Belyaev
llvmlistbot at llvm.org
Mon Oct 12 01:19:37 PDT 2020
Author: Alexander Belyaev
Date: 2020-10-12T10:18:57+02:00
New Revision: b98e5e0f7e99d6b72aa637cc00790b98021e2086
URL: https://github.com/llvm/llvm-project/commit/b98e5e0f7e99d6b72aa637cc00790b98021e2086
DIFF: https://github.com/llvm/llvm-project/commit/b98e5e0f7e99d6b72aa637cc00790b98021e2086.diff
LOG: [mlir] Move Linalg tensors-to-buffers tests to Linalg tests.
The buffer placement preparation tests in
test/Transforms/buffer-placement-preparation* are using Linalg as a test
dialect which leads to confusion and "copy-pasta", i.e. Linalg is being
extended now and when TensorsToBuffers.cpp is changed, TestBufferPlacement is
sometimes kept in-sync, which should not be the case.
This has led to the unnoticed bug, because the tests were in a different directory and the patterns were slightly off.
Differential Revision: https://reviews.llvm.org/D89209
Added:
Modified:
mlir/lib/Dialect/Linalg/Transforms/TensorsToBuffers.cpp
mlir/test/Dialect/Linalg/tensors-to-buffers.mlir
mlir/test/Transforms/buffer-placement-preparation.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TensorsToBuffers.cpp b/mlir/lib/Dialect/Linalg/Transforms/TensorsToBuffers.cpp
index bb37a2ec10df..442117c619de 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TensorsToBuffers.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TensorsToBuffers.cpp
@@ -183,8 +183,12 @@ class GenericOpConverter
Block *newBlock = rewriter.createBlock(&newRegion, newRegion.begin(),
oldBlock->getArgumentTypes());
- // Add the result arguments to the new block.
- for (Value v : newOutputBuffers)
+ // Add the result arguments that do not come from init_tensors to the new
+ // block.
+ // TODO: update this assumption because the reality is more complex under
+ // linalg on tensor based transformations.
+ for (Value v :
+ ValueRange(newOutputBuffers).drop_front(adaptor.init_tensors().size()))
newBlock->addArgument(v.getType().cast<MemRefType>().getElementType());
// Clone the body of the old block to the new block.
diff --git a/mlir/test/Dialect/Linalg/tensors-to-buffers.mlir b/mlir/test/Dialect/Linalg/tensors-to-buffers.mlir
index 7d714092cb7c..4d23b7e10dae 100644
--- a/mlir/test/Dialect/Linalg/tensors-to-buffers.mlir
+++ b/mlir/test/Dialect/Linalg/tensors-to-buffers.mlir
@@ -173,3 +173,142 @@ func @bar() {
// know that things will play nicely at the C ABI boundary).
func @print_memref_f32(%ptr : tensor<*xf32>)
// CHECK-LABEL: func @print_memref_f32(memref<*xf32>)
+
+// -----
+
+#accesses = [
+ affine_map<(i, j, k) -> (j, i, k)>,
+ affine_map<(i, j, k) -> (i, j)>
+]
+
+#trait = {
+ indexing_maps = #accesses,
+ iterator_types = ["parallel", "parallel", "reduction"]
+}
+
+func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
+ %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>) {
+
+ %0 = linalg.generic #trait
+ ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
+ init(%arg1 : tensor<3x2xf32>) {
+ ^bb(%v0: vector<3x4xi4>, %v1: f32) :
+ %f0 = constant 0.0 : f32
+ linalg.yield %f0 : f32
+ } -> tensor<3x2xf32>
+
+ return %0 : tensor<3x2xf32>
+}
+// CHECK-LABEL: func @generic_with_init_tensor
+// CHECK-SAME: (%[[ARG0:.*]]: memref<2x3x4xvector<3x4xi4>>, %[[ARG1:.*]]: memref<3x2xf32>, %[[RESULT0:.*]]: memref<3x2xf32>) {
+// CHECK-NEXT: linalg.generic
+// CHECK: linalg.copy(%[[ARG1]], %[[RESULT0]])
+// CHECK-NEXT: return
+// CHECK-NOT: %
+
+// -----
+
+#accesses = [
+ affine_map<(i, j, k) -> (j, i, k)>,
+ affine_map<(i, j, k) -> (i, j)>
+]
+
+#trait = {
+ indexing_maps = #accesses,
+ iterator_types = ["parallel", "parallel", "reduction"]
+}
+
+func @init_tensor_with_2_uses(%arg0: tensor<2x3x4xvector<3x4xi4>>,
+ %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>, tensor<3x2xf32>) {
+
+ %0 = linalg.generic #trait
+ ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
+ init(%arg1 : tensor<3x2xf32>) {
+ ^bb(%v0: vector<3x4xi4>, %v1: f32) :
+ %f0 = constant 0.0 : f32
+ linalg.yield %f0 : f32
+ } -> tensor<3x2xf32>
+
+ %1 = linalg.generic #trait
+ ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
+ init(%arg1 : tensor<3x2xf32>) {
+ ^bb(%v0: vector<3x4xi4>, %v1: f32) :
+ %f0 = constant 0.0 : f32
+ linalg.yield %f0 : f32
+ } -> tensor<3x2xf32>
+
+ return %0, %1 : tensor<3x2xf32>, tensor<3x2xf32>
+}
+// CHECK-LABEL: func @init_tensor_with_2_uses
+// CHECK-SAME: (%[[ARG0:.*]]: memref<2x3x4xvector<3x4xi4>>, %[[ARG1:.*]]: memref<3x2xf32>, %[[RESULT0:.*]]: memref<3x2xf32>, %[[RESULT1:.*]]: memref<3x2xf32>) {
+// CHECK-NEXT: %[[ALLOC0:.*]] = alloc
+// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[ALLOC0]])
+// CHECK-NEXT: linalg.generic
+// CHECK-SAME: outs(%[[ALLOC0]]
+// CHECK-NEXT: ^bb
+// CHECK-NEXT: constant
+// CHECK-NEXT: yield
+// CHECK-NEXT: }
+// CHECK-NEXT: %[[ALLOC1:.*]] = alloc
+// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[ALLOC1]])
+// CHECK-NEXT: linalg.generic
+// CHECK-SAME: outs(%[[ALLOC1]]
+// CHECK-NEXT: ^bb
+// CHECK-NEXT: constant
+// CHECK-NEXT: yield
+// CHECK-NEXT: }
+// CHECK-NEXT: linalg.copy(%[[ALLOC0]], %[[RESULT0]])
+// CHECK-NEXT: dealloc
+// CHECK-NEXT: linalg.copy(%[[ALLOC1]], %[[RESULT1]])
+// CHECK-NEXT: dealloc
+// CHECK-NEXT: return
+// CHECK-NOT: %
+
+// -----
+
+#accesses = [
+ affine_map<(i, j, k) -> (j, i, k)>,
+ affine_map<(i, j, k) -> (i, j)>
+]
+
+#trait = {
+ indexing_maps = #accesses,
+ iterator_types = ["parallel", "parallel", "reduction"]
+}
+
+func @init_tensor_with_1_use_def_chain(%arg0: tensor<2x3x4xvector<3x4xi4>>,
+ %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>) {
+
+ %0 = linalg.generic #trait
+ ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
+ init(%arg1 : tensor<3x2xf32>) {
+ ^bb(%v0: vector<3x4xi4>, %v1: f32) :
+ %f0 = constant 0.0 : f32
+ linalg.yield %f0 : f32
+ } -> tensor<3x2xf32>
+
+ %1 = linalg.generic #trait
+ ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
+ init(%0 : tensor<3x2xf32>) {
+ ^bb(%v0: vector<3x4xi4>, %v1: f32) :
+ %f0 = constant 0.0 : f32
+ linalg.yield %f0 : f32
+ } -> tensor<3x2xf32>
+
+ return %1 : tensor<3x2xf32>
+}
+// CHECK-LABEL: func @init_tensor_with_1_use_def_chain
+// CHECK-SAME: (%[[ARG0:.*]]: memref<2x3x4xvector<3x4xi4>>, %[[ARG1:.*]]: memref<3x2xf32>, %[[RESULT0:.*]]: memref<3x2xf32>) {
+// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: ^bb
+// CHECK-NEXT: constant
+// CHECK-NEXT: yield
+// CHECK-NEXT: }
+// CHECK-NEXT: linalg.generic
+// CHECK-NEXT: ^bb
+// CHECK-NEXT: constant
+// CHECK-NEXT: yield
+// CHECK-NEXT: }
+// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[RESULT0]])
+// CHECK-NEXT: return
+// CHECK-NOT: %
diff --git a/mlir/test/Transforms/buffer-placement-preparation.mlir b/mlir/test/Transforms/buffer-placement-preparation.mlir
index ac3ec1246211..4fcd225abc7e 100644
--- a/mlir/test/Transforms/buffer-placement-preparation.mlir
+++ b/mlir/test/Transforms/buffer-placement-preparation.mlir
@@ -382,141 +382,3 @@ func @decompose_tuple_typed_function_args_and_results(%arg0: tuple<i1,f32>, %arg
// CHECK-NEXT: linalg.copy(%[[SECOND_TUPLE_SECOND_ELEM]], %[[RESULT0]])
// CHECK-NEXT: linalg.copy(%[[ARG2]], %[[RESULT1]])
// CHECK-NEXT: return %[[SECOND_TUPLE_FIRST_ELEM]], %[[FIRST_TUPLE_FIRST_ELEM]], %[[FIRST_TUPLE_SECOND_ELEM]]
-
-// -----
-
-#accesses = [
- affine_map<(i, j, k) -> (j, i, k)>,
- affine_map<(i, j, k) -> (i, j)>
-]
-
-#trait = {
- indexing_maps = #accesses,
- iterator_types = ["parallel", "parallel", "reduction"]
-}
-
-func @generic_with_init_tensor(
- %arg0: tensor<2x3x4xvector<3x4xi4>>, %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>) {
-
- %0 = linalg.generic #trait
- ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
- init(%arg1 : tensor<3x2xf32>) {
- ^bb(%v0: vector<3x4xi4>, %v1: f32) :
- %f0 = constant 0.0 : f32
- linalg.yield %f0 : f32
- } -> tensor<3x2xf32>
-
- return %0 : tensor<3x2xf32>
-}
-// CHECK-LABEL: func @generic_with_init_tensor
-// CHECK-SAME: (%[[ARG0:.*]]: memref<2x3x4xvector<3x4xi4>>, %[[ARG1:.*]]: memref<3x2xf32>, %[[RESULT0:.*]]: memref<3x2xf32>) {
-// CHECK-NEXT: linalg.generic
-// CHECK: linalg.copy(%[[ARG1]], %[[RESULT0]])
-// CHECK-NEXT: return
-// CHECK-NOT: %
-
-// -----
-
-#accesses = [
- affine_map<(i, j, k) -> (j, i, k)>,
- affine_map<(i, j, k) -> (i, j)>
-]
-
-#trait = {
- indexing_maps = #accesses,
- iterator_types = ["parallel", "parallel", "reduction"]
-}
-
-func @init_tensor_with_2_uses(
- %arg0: tensor<2x3x4xvector<3x4xi4>>, %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>, tensor<3x2xf32>) {
-
- %0 = linalg.generic #trait
- ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
- init(%arg1 : tensor<3x2xf32>) {
- ^bb(%v0: vector<3x4xi4>, %v1: f32) :
- %f0 = constant 0.0 : f32
- linalg.yield %f0 : f32
- } -> tensor<3x2xf32>
-
- %1 = linalg.generic #trait
- ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
- init(%arg1 : tensor<3x2xf32>) {
- ^bb(%v0: vector<3x4xi4>, %v1: f32) :
- %f0 = constant 0.0 : f32
- linalg.yield %f0 : f32
- } -> tensor<3x2xf32>
-
- return %0, %1 : tensor<3x2xf32>, tensor<3x2xf32>
-}
-// CHECK-LABEL: func @init_tensor_with_2_uses
-// CHECK-SAME: (%[[ARG0:.*]]: memref<2x3x4xvector<3x4xi4>>, %[[ARG1:.*]]: memref<3x2xf32>, %[[RESULT0:.*]]: memref<3x2xf32>, %[[RESULT1:.*]]: memref<3x2xf32>) {
-// CHECK-NEXT: %[[ALLOC0:.*]] = alloc
-// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[ALLOC0]])
-// CHECK-NEXT: linalg.generic
-// CHECK-SAME: outs(%[[ALLOC0]]
-// CHECK-NEXT: ^bb
-// CHECK-NEXT: constant
-// CHECK-NEXT: yield
-// CHECK-NEXT: }
-// CHECK-NEXT: %[[ALLOC1:.*]] = alloc
-// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[ALLOC1]])
-// CHECK-NEXT: linalg.generic
-// CHECK-SAME: outs(%[[ALLOC1]]
-// CHECK-NEXT: ^bb
-// CHECK-NEXT: constant
-// CHECK-NEXT: yield
-// CHECK-NEXT: }
-// CHECK-NEXT: linalg.copy(%[[ALLOC0]], %[[RESULT0]])
-// CHECK-NEXT: linalg.copy(%[[ALLOC1]], %[[RESULT1]])
-// CHECK-NEXT: return
-// CHECK-NOT: %
-
-// -----
-
-#accesses = [
- affine_map<(i, j, k) -> (j, i, k)>,
- affine_map<(i, j, k) -> (i, j)>
-]
-
-#trait = {
- indexing_maps = #accesses,
- iterator_types = ["parallel", "parallel", "reduction"]
-}
-
-func @init_tensor_with_1_use_def_chain(
- %arg0: tensor<2x3x4xvector<3x4xi4>>, %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>) {
-
- %0 = linalg.generic #trait
- ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
- init(%arg1 : tensor<3x2xf32>) {
- ^bb(%v0: vector<3x4xi4>, %v1: f32) :
- %f0 = constant 0.0 : f32
- linalg.yield %f0 : f32
- } -> tensor<3x2xf32>
-
- %1 = linalg.generic #trait
- ins(%arg0 : tensor<2x3x4xvector<3x4xi4>>)
- init(%0 : tensor<3x2xf32>) {
- ^bb(%v0: vector<3x4xi4>, %v1: f32) :
- %f0 = constant 0.0 : f32
- linalg.yield %f0 : f32
- } -> tensor<3x2xf32>
-
- return %1 : tensor<3x2xf32>
-}
-// CHECK-LABEL: func @init_tensor_with_1_use_def_chain
-// CHECK-SAME: (%[[ARG0:.*]]: memref<2x3x4xvector<3x4xi4>>, %[[ARG1:.*]]: memref<3x2xf32>, %[[RESULT0:.*]]: memref<3x2xf32>) {
-// CHECK-NEXT: linalg.generic
-// CHECK-NEXT: ^bb
-// CHECK-NEXT: constant
-// CHECK-NEXT: yield
-// CHECK-NEXT: }
-// CHECK-NEXT: linalg.generic
-// CHECK-NEXT: ^bb
-// CHECK-NEXT: constant
-// CHECK-NEXT: yield
-// CHECK-NEXT: }
-// CHECK-NEXT: linalg.copy(%[[ARG1]], %[[RESULT0]])
-// CHECK-NEXT: return
-// CHECK-NOT: %
-
More information about the Mlir-commits
mailing list