[Mlir-commits] [mlir] be9c3bd - [MLIR] Fix fusion of linalg.indexed_generic producer into tiled (Indexed)GenericOp.
Alexander Belyaev
llvmlistbot at llvm.org
Thu Apr 16 01:45:46 PDT 2020
Author: Alexander Belyaev
Date: 2020-04-16T10:45:17+02:00
New Revision: be9c3bdc44baddfd1ed0efeb4db249198a21b20d
URL: https://github.com/llvm/llvm-project/commit/be9c3bdc44baddfd1ed0efeb4db249198a21b20d
DIFF: https://github.com/llvm/llvm-project/commit/be9c3bdc44baddfd1ed0efeb4db249198a21b20d.diff
LOG: [MLIR] Fix fusion of linalg.indexed_generic producer into tiled (Indexed)GenericOp.
Differential Revision: https://reviews.llvm.org/D78209
Added:
mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
Modified:
mlir/include/mlir/Transforms/LoopUtils.h
mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
mlir/lib/Transforms/Utils/LoopUtils.cpp
mlir/test/Dialect/Linalg/fusion.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Transforms/LoopUtils.h b/mlir/include/mlir/Transforms/LoopUtils.h
index 3bc34f1444e0..1d0e8d39bd61 100644
--- a/mlir/include/mlir/Transforms/LoopUtils.h
+++ b/mlir/include/mlir/Transforms/LoopUtils.h
@@ -287,6 +287,11 @@ LogicalResult
separateFullTiles(MutableArrayRef<AffineForOp> nest,
SmallVectorImpl<AffineForOp> *fullTileNest = nullptr);
+/// Replaces all uses of `orig` with `replacement` except if the user is listed
+/// in `exceptions`.
+void replaceAllUsesExcept(Value orig, Value replacement,
+ const SmallPtrSetImpl<Operation *> &exceptions);
+
} // end namespace mlir
#endif // MLIR_TRANSFORMS_LOOP_UTILS_H
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 5c3763523cdd..96cbdab5ac47 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -24,6 +24,7 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/FoldUtils.h"
+#include "mlir/Transforms/LoopUtils.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -97,7 +98,26 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
auto operands = getAssumedNonViewOperands(op);
clonedViews.append(operands.begin(), operands.end());
- return op.clone(b, loc, clonedViews);
+
+ Operation *clonedOp = op.clone(b, loc, clonedViews);
+ // When the producer is an IndexedGenercOp, we have to transform its block
+ // IV arguments according to the tiling of the consumer, i.e. offset them by
+ // the values computed in `loopRanges`.
+ if (auto indexedGenericOp = dyn_cast<IndexedGenericOp>(clonedOp)) {
+ auto &block = indexedGenericOp.region().front();
+
+ OpBuilder::InsertionGuard g(b);
+ b.setInsertionPointToStart(&block);
+ for (unsigned i = 0, e = indexedGenericOp.getNumLoops(); i < e; ++i) {
+ Value oldIndex = block.getArgument(i);
+ Value newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
+ loopRanges[i].offset);
+ replaceAllUsesExcept(
+ oldIndex, newIndex,
+ SmallPtrSet<Operation *, 1>{newIndex.getDefiningOp()});
+ }
+ }
+ return clonedOp;
}
struct ViewDimension {
@@ -284,10 +304,6 @@ fuseProducerOfDep(OpBuilder &b, LinalgOp consumer, unsigned consumerIdx,
LLVM_DEBUG(dbgs() << "\n***Consider producer:\t"
<< *dependence.dependentOpView.op << "\n");
auto producer = cast<LinalgOp>(dependence.dependentOpView.op);
- if (isa<linalg::IndexedGenericOp>(dependence.dependentOpView.op)) {
- LLVM_DEBUG(dbgs() << "Not fusing indexed_generic producer");
- continue;
- }
// Check that the dependence is indeed on the input `consumerIdx` view.
auto consumedView = dependence.indexingView;
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 3aebc83678f7..c03cf6cfc282 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -1158,17 +1158,6 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
return tileLoops;
}
-// Replaces all uses of `orig` with `replacement` except if the user is listed
-// in `exceptions`.
-static void
-replaceAllUsesExcept(Value orig, Value replacement,
- const SmallPtrSetImpl<Operation *> &exceptions) {
- for (auto &use : llvm::make_early_inc_range(orig.getUses())) {
- if (exceptions.count(use.getOwner()) == 0)
- use.set(replacement);
- }
-}
-
/// Return the new lower bound, upper bound, and step in that order. Insert any
/// additional bounds calculations before the given builder and any additional
/// conversion back to the original loop induction value inside the given Block.
@@ -2382,3 +2371,12 @@ mlir::separateFullTiles(MutableArrayRef<AffineForOp> inputNest,
return success();
}
+
+void mlir::replaceAllUsesExcept(
+ Value orig, Value replacement,
+ const SmallPtrSetImpl<Operation *> &exceptions) {
+ for (auto &use : llvm::make_early_inc_range(orig.getUses())) {
+ if (exceptions.count(use.getOwner()) == 0)
+ use.set(replacement);
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index 82ef196d0d97..14a12840d1d0 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -604,111 +604,6 @@ func @pointwise_no_view(%M: index, %N: index) {
// CHECK: linalg.generic
// CHECK: mulf
-// -----
-
-#map5 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-#map6 = affine_map<(d0, d1) -> (d0, d1)>
-#id_2d = affine_map<(i, j) -> (i, j)>
-#pointwise_2d_trait = {
- args_in = 2,
- args_out = 1,
- indexing_maps = [#id_2d, #id_2d, #id_2d],
- iterator_types = ["parallel", "parallel"]
-}
-func @indexed_generic_test(%A: memref<?x?xf32>,
- %B: memref<?x?xf32>,
- %C: memref<?x?xf32>,
- %D: memref<?x?xf32>) {
- linalg.generic #pointwise_2d_trait %A, %B, %C {
- ^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors
- %2 = addf %e, %arg5 : f32
- linalg.yield %2 : f32
- }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
- %c1 = constant 1 : index
- %c0 = constant 0 : index
- %c25 = constant 25 : index
- %c10 = constant 10 : index
- %0 = dim %C, 0 : memref<?x?xf32>
- %1 = dim %C, 1 : memref<?x?xf32>
- %2 = dim %D, 0 : memref<?x?xf32>
- %3 = dim %D, 1 : memref<?x?xf32>
- loop.for %arg2 = %c0 to %0 step %c10 {
- loop.for %arg3 = %c0 to %1 step %c25 {
- %4 = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
- memref<?x?xf32> to memref<?x?xf32, #map5>
- %5 = std.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] :
- memref<?x?xf32> to memref<?x?xf32, #map5>
- linalg.indexed_generic {
- indexing_maps = [#map6, #map6],
- iterator_types = ["parallel", "parallel"],
- args_in = 1,
- args_out = 1
- } %4, %5 {
- ^bb0(%arg4: index, %arg5: index, %arg6: f32, %arg7: f32):
- %6 = addi %arg4, %arg2 : index
- %7 = addi %arg5, %arg3 : index
- %8 = index_cast %6 : index to i32
- %9 = sitofp %8 : i32 to f32
- %10 = index_cast %7 : index to i32
- %11 = sitofp %10 : i32 to f32
- %12 = addf %9, %11 : f32
- linalg.yield %12 : f32
- }: memref<?x?xf32, #map5>, memref<?x?xf32, #map5>
- }
- }
- return
-}
-// CHECK-LABEL: func @indexed_generic_test
-// CHECK: loop.for
-// CHECK: loop.for
-// CHECK-NOT: loop.for
-// CHECK: linalg.generic
-// CHECK: addf
-// CHECK: linalg.indexed_generic
-// CHECK: index_cast
-
-// -----
-
-//
-// We should not be fusing indexed_generic into a generic yet.
-// https://bugs.llvm.org/show_bug.cgi?id=44875
-//
-
-#map0 = affine_map<(d0)[s0,s1] -> (d0 * s1 + s0)>
-#pointwise_map = affine_map<(d0) -> (d0)>
-#pointwise_1d_trait = {
- args_in = 1,
- args_out = 1,
- indexing_maps = [#pointwise_map, #pointwise_map],
- iterator_types = ["parallel"]
-}
-
-func @nofuse_indexed_generic(%A: memref<?xf32>, %B: memref<?xf32>, %C: memref<?xf32>) {
- linalg.indexed_generic #pointwise_1d_trait %A, %B {
- ^bb0(%i: index, %a: f32, %b: f32):
- linalg.yield %a : f32
- }: memref<?xf32>, memref<?xf32>
-
- %c0 = constant 0 : index
- %c1 = constant 1 : index
- %c10 = constant 10 : index
- %dB = dim %B, 0 : memref<?xf32>
- loop.for %i = %c0 to %dB step %c10 {
- %subB = subview %B[%i][%c10][%c1] : memref<?xf32> to memref<?xf32, #map0>
- %subC = subview %C[%i][%c10][%c1] : memref<?xf32> to memref<?xf32, #map0>
- linalg.generic #pointwise_1d_trait %subB, %subC {
- ^bb0(%b: f32, %c: f32):
- linalg.yield %b : f32
- }: memref<?xf32, #map0>, memref<?xf32, #map0>
- }
- return
-}
-// CHECK-LABEL: func @nofuse_indexed_generic
-// CHECK-NOT: loop.for
-// CHECK: linalg.indexed_generic
-// CHECK: loop.for
-// CHECK-NOT: linalg.indexed_generic
-// CHECK: linalg.generic
// -----
diff --git a/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
new file mode 100644
index 000000000000..eaef27b2f3de
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
@@ -0,0 +1,186 @@
+// RUN: mlir-opt %s -linalg-fusion -split-input-file | FileCheck %s --dump-input-on-failure
+
+#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+#id_2d = affine_map<(d0, d1) -> (d0, d1)>
+#pointwise_2d_trait = {
+ args_in = 2,
+ args_out = 1,
+ indexing_maps = [#id_2d, #id_2d, #id_2d],
+ iterator_types = ["parallel", "parallel"]
+}
+func @fuse_indexed_generic_consumer(%A: memref<?x?xf32>,
+ %B: memref<?x?xf32>,
+ %C: memref<?x?xf32>,
+ %D: memref<?x?xf32>) {
+ linalg.generic #pointwise_2d_trait %A, %B, %C {
+ ^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors
+ %2 = addf %e, %arg5 : f32
+ linalg.yield %2 : f32
+ }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
+ %c1 = constant 1 : index
+ %c0 = constant 0 : index
+ %c25 = constant 25 : index
+ %c10 = constant 10 : index
+ %0 = dim %C, 0 : memref<?x?xf32>
+ %1 = dim %C, 1 : memref<?x?xf32>
+ %2 = dim %D, 0 : memref<?x?xf32>
+ %3 = dim %D, 1 : memref<?x?xf32>
+ loop.for %arg2 = %c0 to %0 step %c10 {
+ loop.for %arg3 = %c0 to %1 step %c25 {
+ %4 = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
+ memref<?x?xf32> to memref<?x?xf32, #map>
+ %5 = std.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] :
+ memref<?x?xf32> to memref<?x?xf32, #map>
+ linalg.indexed_generic {
+ indexing_maps = [#id_2d, #id_2d],
+ iterator_types = ["parallel", "parallel"],
+ args_in = 1,
+ args_out = 1
+ } %4, %5 {
+ ^bb0(%arg4: index, %arg5: index, %arg6: f32, %arg7: f32):
+ %6 = addi %arg4, %arg2 : index
+ %7 = addi %arg5, %arg3 : index
+ %8 = index_cast %6 : index to i32
+ %9 = sitofp %8 : i32 to f32
+ %10 = index_cast %7 : index to i32
+ %11 = sitofp %10 : i32 to f32
+ %12 = addf %9, %11 : f32
+ linalg.yield %12 : f32
+ }: memref<?x?xf32, #map>, memref<?x?xf32, #map>
+ }
+ }
+ return
+}
+// CHECK-LABEL: func @fuse_indexed_generic_consumer
+// CHECK: loop.for
+// CHECK: loop.for
+// CHECK-NOT: loop.for
+// CHECK: linalg.generic
+// CHECK-NOT: addi
+// CHECK: addf
+// CHECK: linalg.indexed_generic
+// CHECK: index_cast
+
+// -----
+
+#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+#id_2d = affine_map<(d0, d1) -> (d0, d1)>
+#pointwise_2d_trait = {
+ args_in = 2,
+ args_out = 1,
+ indexing_maps = [#id_2d, #id_2d, #id_2d],
+ iterator_types = ["parallel", "parallel"]
+}
+func @fuse_indexed_generic_producer(%A: memref<?x?xf32>,
+ %B: memref<?x?xf32>,
+ %C: memref<?x?xf32>,
+ %D: memref<?x?xf32>) {
+ %c1 = constant 1 : index
+ %c0 = constant 0 : index
+ %c25 = constant 25 : index
+ %c10 = constant 10 : index
+ linalg.indexed_generic #pointwise_2d_trait %A, %B, %C {
+ ^bb0(%i: index, %j: index, %a: f32, %b: f32, %c: f32): // no predecessors
+ %i_int = index_cast %i: index to i32
+ %i_float = sitofp %i_int : i32 to f32
+ %ab = addf %a, %b : f32
+ %out = addf %ab, %i_float : f32
+ linalg.yield %out : f32
+ }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
+ %C_X = dim %C, 0 : memref<?x?xf32>
+ %C_Y = dim %C, 1 : memref<?x?xf32>
+ %D_X = dim %D, 0 : memref<?x?xf32>
+ %D_Y = dim %D, 1 : memref<?x?xf32>
+ loop.parallel (%arg2, %arg3) = (%c0, %c0) to (%C_X, %C_Y) step (%c10, %c25) {
+ %C_view = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
+ memref<?x?xf32> to memref<?x?xf32, #map>
+ %D_view = std.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] :
+ memref<?x?xf32> to memref<?x?xf32, #map>
+ linalg.generic {
+ indexing_maps = [#id_2d, #id_2d],
+ iterator_types = ["parallel", "parallel"],
+ args_in = 1,
+ args_out = 1
+ } %C_view, %D_view {
+ ^bb0( %a: f32, %b: f32):
+ %ab = addf %a, %b : f32
+ linalg.yield %ab : f32
+ }: memref<?x?xf32, #map>, memref<?x?xf32, #map>
+ }
+ return
+}
+// CHECK-LABEL: func @fuse_indexed_generic_producer
+// CHECK: loop.parallel ([[I:%.*]], [[J:%.*]]) =
+// CHECK-NOT: loop.parallel
+// CHECK: linalg.indexed_generic
+// CHECK: ^bb0([[i:%.*]]: index, [[j:%.*]]: index
+// CHECK: [[i_new:%.*]] = addi [[i]], [[I]] : index
+// CHECK: [[j_new:%.*]] = addi [[j]], [[J]] : index
+// CHECK: {{.*}} = index_cast [[i_new]] : index to i32
+// CHECK: linalg.generic
+// CHECK: addf
+
+// -----
+
+#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+#id_2d = affine_map<(d0, d1) -> (d0, d1)>
+#pointwise_2d_trait = {
+ args_in = 2,
+ args_out = 1,
+ indexing_maps = [#id_2d, #id_2d, #id_2d],
+ iterator_types = ["parallel", "parallel"]
+}
+func @fuse_indexed_generic_producer_tile_second_dim_only(%A: memref<?x?xf32>,
+ %B: memref<?x?xf32>,
+ %C: memref<?x?xf32>,
+ %D: memref<?x?xf32>) {
+ %c1 = constant 1 : index
+ %c3 = constant 3 : index
+ %c0 = constant 0 : index
+ linalg.indexed_generic #pointwise_2d_trait %A, %B, %C {
+ ^bb0(%i: index, %j: index, %a: f32, %b: f32, %c: f32): // no predecessors
+ %j_int = index_cast %j: index to i32
+ %j_float = sitofp %j_int : i32 to f32
+ %ab = addf %a, %b : f32
+ %out = addf %ab, %j_float : f32
+ linalg.yield %out : f32
+ }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
+ %C_X = dim %C, 0 : memref<?x?xf32>
+ %C_Y = dim %C, 1 : memref<?x?xf32>
+ %D_X = dim %D, 0 : memref<?x?xf32>
+ %D_Y = dim %D, 1 : memref<?x?xf32>
+ %3 = linalg.range %c0 : %C_Y : %c3 : !linalg.range
+ loop.parallel (%j) = (%c0) to (%C_Y) step (%c3) {
+ %0 = affine.min affine_map<(d0, d1, d2) -> (d0, d1 - d2)>(%c3, %C_Y, %j)
+ %C_view = subview %C[%c0, %j] [%C_X, %0] [%c1, %c1] :
+ memref<?x?xf32> to memref<?x?xf32, #map>
+
+ %1 = affine.min affine_map<(d0, d1, d2) -> (d0, d1 - d2)>(%c3, %D_Y, %j)
+ %D_view = subview %D[%c0, %j] [%D_X, %1] [%c1, %c1] :
+ memref<?x?xf32> to memref<?x?xf32, #map>
+
+ linalg.generic {
+ indexing_maps = [#id_2d, #id_2d],
+ iterator_types = ["parallel", "parallel"],
+ args_in = 1,
+ args_out = 1
+ } %C_view, %D_view {
+ ^bb0( %a: f32, %b: f32):
+ %ab = addf %a, %b : f32
+ linalg.yield %ab : f32
+ }: memref<?x?xf32, #map>, memref<?x?xf32, #map>
+ loop.yield
+ }
+ return
+}
+// CHECK-LABEL: func @fuse_indexed_generic_producer_tile_second_dim_only
+// CHECK: [[C0:%.*]] = constant 0 : index
+// CHECK: loop.parallel ([[J:%.*]]) =
+// CHECK-NOT: loop.parallel
+// CHECK: linalg.indexed_generic
+// CHECK: ^bb0([[i:%.*]]: index, [[j:%.*]]: index
+// CHECK: [[i_new:%.*]] = addi [[i]], [[C0]] : index
+// CHECK: [[j_new:%.*]] = addi [[j]], [[J]] : index
+// CHECK: {{.*}} = index_cast [[j_new]] : index to i32
+// CHECK: linalg.generic
+// CHECK: addf
More information about the Mlir-commits
mailing list