[Mlir-commits] [mlir] 2e9abc0 - [mlir] drop unnecssary transform.with_pdl_patterns from tests, NFC
Alex Zinenko
llvmlistbot at llvm.org
Tue Oct 11 05:26:21 PDT 2022
Author: Alex Zinenko
Date: 2022-10-11T12:26:11Z
New Revision: 2e9abc0c714a97b0412de8b09b74735fafa4432b
URL: https://github.com/llvm/llvm-project/commit/2e9abc0c714a97b0412de8b09b74735fafa4432b
DIFF: https://github.com/llvm/llvm-project/commit/2e9abc0c714a97b0412de8b09b74735fafa4432b.diff
LOG: [mlir] drop unnecssary transform.with_pdl_patterns from tests, NFC
Many tests wrap the piece of the IR related to the transform dialect
into `transform.with_pdl_patterns` without actually using PDL patterns
inside. Some of these are leftovers from migration to `structured.match`
and some others are cargo cult, both are useless and pollute the tests.
Reviewed By: guraypp
Differential Revision: https://reviews.llvm.org/D135661
Added:
Modified:
mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
mlir/test/Dialect/GPU/transform-gpu-failing.mlir
mlir/test/Dialect/GPU/transform-gpu.mlir
mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
mlir/test/Dialect/Linalg/promote.mlir
mlir/test/Dialect/Linalg/promotion_options.mlir
mlir/test/Dialect/Linalg/tile-to-foreach-thread.mlir
mlir/test/Dialect/Linalg/transform-op-decompose.mlir
mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
mlir/test/Dialect/Linalg/transform-op-fuse.mlir
mlir/test/Dialect/Linalg/transform-op-generalize.mlir
mlir/test/Dialect/Linalg/transform-op-interchange.mlir
mlir/test/Dialect/Linalg/transform-op-match.mlir
mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
mlir/test/Dialect/Linalg/transform-op-pad.mlir
mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
mlir/test/Dialect/Linalg/transform-op-split.mlir
mlir/test/Dialect/Linalg/transform-op-tile.mlir
mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
mlir/test/Dialect/Linalg/transform-patterns.mlir
mlir/test/Dialect/Linalg/transform-promotion.mlir
mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
mlir/test/Dialect/Linalg/vectorization.mlir
mlir/test/Dialect/SCF/transform-ops.mlir
Removed:
################################################################################
diff --git a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
index edbef42e725e7..151c8e6996319 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
@@ -2,14 +2,11 @@
// Test One-Shot Bufferize.
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["func.func"]} in %arg1
- transform.bufferization.one_shot_bufferize %0
- {target_is_module = false}
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["func.func"]} in %arg1
+ transform.bufferization.one_shot_bufferize %0
+ {target_is_module = false}
}
// CHECK-LABEL: func @test_function(
@@ -34,14 +31,11 @@ func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf3
// Test analysis of One-Shot Bufferize only.
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["func.func"]} in %arg1
- transform.bufferization.one_shot_bufferize %0
- {target_is_module = false, test_analysis_only = true}
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["func.func"]} in %arg1
+ transform.bufferization.one_shot_bufferize %0
+ {target_is_module = false, test_analysis_only = true}
}
// CHECK-LABEL: func @test_function_analysis(
@@ -60,14 +54,11 @@ func.func @test_function_analysis(%A : tensor<?xf32>, %v : vector<4xf32>) -> (te
// Test One-Shot Bufferize transform failure with an unknown op. This would be
// allowed with `allow_unknown_ops`.
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["func.func"]} in %arg1
- // expected-error @+1 {{bufferization failed}}
- transform.bufferization.one_shot_bufferize %0 {target_is_module = false}
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["func.func"]} in %arg1
+ // expected-error @+1 {{bufferization failed}}
+ transform.bufferization.one_shot_bufferize %0 {target_is_module = false}
}
func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
@@ -80,13 +71,10 @@ func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
// Test One-Shot Bufferize transform failure with a module op.
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- // %arg1 is the module
- transform.bufferization.one_shot_bufferize %arg1
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ // %arg1 is the module
+ transform.bufferization.one_shot_bufferize %arg1
}
module {
diff --git a/mlir/test/Dialect/GPU/transform-gpu-failing.mlir b/mlir/test/Dialect/GPU/transform-gpu-failing.mlir
index 41c996ef86970..f61ed8fb418d9 100644
--- a/mlir/test/Dialect/GPU/transform-gpu-failing.mlir
+++ b/mlir/test/Dialect/GPU/transform-gpu-failing.mlir
@@ -44,15 +44,12 @@ func.func @map_nested_foreach_to_threads_excessive_threads(%x: memref<2 x 32 x f
return %y : memref<2 x 32 x f32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- // expected-error @below {{Trying to launch a GPU kernel with gridDim = (1, 1, 1) blockDim = (1200, 9, 1). It is larger than the limits.}}
- // expected-note @below {{"blockDim" is very large}}
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [1200, 9, 1] }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ // expected-error @below {{Trying to launch a GPU kernel with gridDim = (1, 1, 1) blockDim = (1200, 9, 1). It is larger than the limits.}}
+ // expected-note @below {{"blockDim" is very large}}
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [1200, 9, 1] }
}
// -----
@@ -88,14 +85,12 @@ func.func @map_nested_foreach_to_threads_fewer_threads(%x: memref<2 x 32 x f32>,
return %y : memref<2 x 32 x f32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- // expected-error @below {{The requested GPU threads are fewer than the number of loop trip counts. Try to tile scf.foreach_thread before mapping or set small blockDim.}}
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
- }
+
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ // expected-error @below {{The requested GPU threads are fewer than the number of loop trip counts. Try to tile scf.foreach_thread before mapping or set small blockDim.}}
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
// -----
@@ -117,14 +112,11 @@ func.func @map_nested_foreach_to_threads_dynamic_trip_count(%x: memref<2 x 32 x
return %y : memref<2 x 32 x f32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- // expected-error @below {{unsupported dynamic blockdim size}}
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ // expected-error @below {{unsupported dynamic blockdim size}}
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
// -----
@@ -145,14 +137,11 @@ func.func @map_nested_foreach_to_threads_4d_loop(%x: memref<2x32x32x32xf32>, %y:
return %y : memref<2x32x32x32xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- // expected-error @below {{scf.foreach_thread with rank > 3 does not lower to gpu.thread_id}}
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ // expected-error @below {{scf.foreach_thread with rank > 3 does not lower to gpu.thread_id}}
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
// -----
@@ -168,16 +157,13 @@ func.func @map_nested_foreach_to_threads_not_buffer(%x: tensor<32x32xf32>, %y: t
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0
- %foreach, %tiled = transform.structured.tile_to_foreach_thread_op %matmul num_threads [10, 20, 30]
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- // expected-error @below {{only bufferized scf.foreach_thread lowers to gpu.thread_id}}
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0
+ %foreach, %tiled = transform.structured.tile_to_foreach_thread_op %matmul num_threads [10, 20, 30]
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ // expected-error @below {{only bufferized scf.foreach_thread lowers to gpu.thread_id}}
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
// -----
diff --git a/mlir/test/Dialect/GPU/transform-gpu.mlir b/mlir/test/Dialect/GPU/transform-gpu.mlir
index 4243b3ee12872..d4ff7ffce8bef 100644
--- a/mlir/test/Dialect/GPU/transform-gpu.mlir
+++ b/mlir/test/Dialect/GPU/transform-gpu.mlir
@@ -30,13 +30,10 @@ func.func @saxpy2dblock(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
return %y : !type
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- transform.gpu.map_foreach_to_blocks %funcop { blockDim = [12, 9, 1]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ transform.gpu.map_foreach_to_blocks %funcop { blockDim = [12, 9, 1]}
}
// -----
@@ -87,13 +84,10 @@ func.func @saxpy2d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !g
return %y : !type
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1] }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1] }
}
// -----
@@ -129,14 +123,11 @@ func.func @saxpy4d(%x: !type4d, %y: !type4d, %alpha : f32) -> !type4d {
return %y : !type4d
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["func.func"]} in %arg0
- %gpuLaunch = transform.gpu.map_foreach_to_blocks %funcop { generate_gpu_launch }
- transform.gpu.map_nested_foreach_to_threads %gpuLaunch { blockDim = [32, 4, 1] }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["func.func"]} in %arg0
+ %gpuLaunch = transform.gpu.map_foreach_to_blocks %funcop { generate_gpu_launch }
+ transform.gpu.map_nested_foreach_to_threads %gpuLaunch { blockDim = [32, 4, 1] }
}
// -----
@@ -166,11 +157,8 @@ func.func @saxpy2d_no_barrier(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %
return %y : !type
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
- transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1], syncAfterDistribute = false }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg0: !pdl.operation):
+ %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
+ transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1], syncAfterDistribute = false }
}
diff --git a/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir b/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
index 766b7396571be..4cc5ef59fbd36 100644
--- a/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
+++ b/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
@@ -1,22 +1,19 @@
// RUN: mlir-opt --test-transform-dialect-interpreter --canonicalize %s | FileCheck %s
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- // This implements a 2D multisize tiling with target sizes [3, 10].
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3}
- %t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10}
- %2:2 = transform.structured.split %0 after %1#2 { dimension = 0 }
- %3:2 = transform.structured.tile %2#0 [%1#0]
- %4:2 = transform.structured.tile %2#1 [%1#1]
- %5 = merge_handles %3#0, %4#0 : !pdl.operation
- %tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation
- %6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 }
- transform.structured.tile %6#0 [0, %tt#0]
- transform.structured.tile %6#1 [0, %tt#1]
- }
+// This implements a 2D multisize tiling with target sizes [3, 10].
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3}
+ %t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10}
+ %2:2 = transform.structured.split %0 after %1#2 { dimension = 0 }
+ %3:2 = transform.structured.tile %2#0 [%1#0]
+ %4:2 = transform.structured.tile %2#1 [%1#1]
+ %5 = merge_handles %3#0, %4#0 : !pdl.operation
+ %tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation
+ %6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 }
+ transform.structured.tile %6#0 [0, %tt#0]
+ transform.structured.tile %6#1 [0, %tt#1]
}
func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 4a77671c08324..1a384d0d68529 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -66,13 +66,10 @@ func.func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK-NOT: memref.dealloc %[[tmpB]] : memref<48xi8>
// CHECK-NOT: memref.dealloc %[[tmpC]] : memref<24xi8>
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = transform.structured.promote %0 { use_alloca }
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = transform.structured.promote %0 { use_alloca }
}
// -----
@@ -139,16 +136,12 @@ func.func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: memref.dealloc %[[tmpB_f64]] : memref<96xi8>
// CHECK: memref.dealloc %[[tmpC_f64]] : memref<48xi8>
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = transform.structured.promote %0
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = transform.structured.promote %0
}
-
// -----
#map6 = affine_map<(d0, d1, d2) -> (d0, d2)>
@@ -189,11 +182,8 @@ func.func @promote_rank_reducing_subviews(%arg0: memref<?x?x?x64xf32, strided<[
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match interface{LinalgOp} in %arg1
- %1 = transform.structured.promote %0
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match interface{LinalgOp} in %arg1
+ %1 = transform.structured.promote %0
}
diff --git a/mlir/test/Dialect/Linalg/promotion_options.mlir b/mlir/test/Dialect/Linalg/promotion_options.mlir
index 0ce4fe8e0ee5f..5346d3fedcc6c 100644
--- a/mlir/test/Dialect/Linalg/promotion_options.mlir
+++ b/mlir/test/Dialect/Linalg/promotion_options.mlir
@@ -31,12 +31,9 @@ func.func @gemm(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>
// CHECK: memref.dealloc %[[A0]]
// CHECK: memref.dealloc %[[A1]]
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1, %loops:3 = transform.structured.tile %0 [16, 16, 16]
- %2 = transform.structured.promote %1 { operands_to_promote = [0, 2], force_full_tiles = [false, false] }
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1, %loops:3 = transform.structured.tile %0 [16, 16, 16]
+ %2 = transform.structured.promote %1 { operands_to_promote = [0, 2], force_full_tiles = [false, false] }
}
diff --git a/mlir/test/Dialect/Linalg/tile-to-foreach-thread.mlir b/mlir/test/Dialect/Linalg/tile-to-foreach-thread.mlir
index c81557f4bbc7b..fb5c72c4f7101 100644
--- a/mlir/test/Dialect/Linalg/tile-to-foreach-thread.mlir
+++ b/mlir/test/Dialect/Linalg/tile-to-foreach-thread.mlir
@@ -32,13 +32,10 @@ module {
return %0 : tensor<?x?xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1:2 = transform.structured.tile_to_foreach_thread_op %0 num_threads [10, 20] (mapped to dims [1, 0])
- }
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1:2 = transform.structured.tile_to_foreach_thread_op %0 num_threads [10, 20] (mapped to dims [1, 0])
}
}
@@ -76,13 +73,10 @@ func.func @matmul_static(%A: tensor<100x200xf32>, %B: tensor<200x300xf32>, %C: t
return %0 : tensor<100x300xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1:2 = transform.structured.tile_to_foreach_thread_op %0 num_threads [10, 21]
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1:2 = transform.structured.tile_to_foreach_thread_op %0 num_threads [10, 21]
}
@@ -120,13 +114,10 @@ func.func @matmul_tile_size_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C
return %0 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1:2 = transform.structured.tile_to_foreach_thread_op %0 tile_sizes [10, 20]
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1:2 = transform.structured.tile_to_foreach_thread_op %0 tile_sizes [10, 20]
}
// -----
@@ -161,13 +152,10 @@ func.func @matmul_tile_size_static(%A: tensor<100x200xf32>, %B: tensor<200x300xf
return %0 : tensor<100x300xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1:2 = transform.structured.tile_to_foreach_thread_op %0 tile_sizes [10, 21]
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1:2 = transform.structured.tile_to_foreach_thread_op %0 tile_sizes [10, 21]
}
// -----
@@ -186,13 +174,10 @@ module {
return %result : tensor<4xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1:2 = transform.structured.tile_to_foreach_thread_op %0 num_threads [2] (mapped to dims [0])
- }
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1:2 = transform.structured.tile_to_foreach_thread_op %0 num_threads [2] (mapped to dims [0])
}
}
// CHECK-DAG: #[[$map0:.+]] = affine_map<(d0) -> (d0 * 2)>
@@ -240,14 +225,11 @@ func.func @matmul_tile_size_dynamic_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?x
return %0 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %sz = transform.structured.match ops{["test.dummy"]} in %arg1
- %1:2 = transform.structured.tile_to_foreach_thread_op %0 tile_sizes [%sz, 20]
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %sz = transform.structured.match ops{["test.dummy"]} in %arg1
+ %1:2 = transform.structured.tile_to_foreach_thread_op %0 tile_sizes [%sz, 20]
}
// -----
@@ -298,13 +280,10 @@ transform.with_pdl_patterns {
return %res1, %res2 : tensor<100xf32>, tensor<100xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %foreach_thread, %tiled_generic = transform.structured.tile_to_foreach_thread_op %0 num_threads [7]
- }
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %foreach_thread, %tiled_generic = transform.structured.tile_to_foreach_thread_op %0 num_threads [7]
}
// -----
@@ -355,14 +334,9 @@ transform.with_pdl_patterns {
return %res2, %res3 : tensor<300x100xf32>, tensor<300xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%IN_MAT1: !pdl.operation):
- transform.sequence %IN_MAT1 : !pdl.operation failures(propagate) {
- ^bb1(%IN_MAT2: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %IN_MAT2
- %foreach_thread, %tiled_generic = transform.structured.tile_to_foreach_thread_op %0 num_threads [4]
- }
+ transform.sequence failures(propagate) {
+ ^bb1(%IN_MAT2: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %IN_MAT2
+ %foreach_thread, %tiled_generic = transform.structured.tile_to_foreach_thread_op %0 num_threads [4]
}
-
-
diff --git a/mlir/test/Dialect/Linalg/transform-op-decompose.mlir b/mlir/test/Dialect/Linalg/transform-op-decompose.mlir
index 795fe2ea805a5..81ee39d0c557d 100644
--- a/mlir/test/Dialect/Linalg/transform-op-decompose.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-decompose.mlir
@@ -56,11 +56,8 @@ func.func @depthwise_conv_2d_nhwc_hwc(%input: tensor<1x1x113x96xf32>, %filter: t
return %0: tensor<1x1x56x96xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match interface{LinalgOp} in %arg1
- %1 = transform.structured.decompose %0
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match interface{LinalgOp} in %arg1
+ %1 = transform.structured.decompose %0
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir b/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
index 588588f26bbe3..b1af4ef2869be 100644
--- a/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
@@ -41,16 +41,13 @@ module {
func.func @dummy2() { return }
func.func @dummy3() { return }
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
- %1 = transform.structured.match ops{["scf.foreach_thread"]} in %arg1
-
- // linalg.fill is tileable. The op is tiled and fused.
- transform.structured.fuse_into_containing_op %0 into %1
- }
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
+ %1 = transform.structured.match ops{["scf.foreach_thread"]} in %arg1
+
+ // linalg.fill is tileable. The op is tiled and fused.
+ transform.structured.fuse_into_containing_op %0 into %1
}
}
@@ -87,16 +84,13 @@ module {
func.return %2 : tensor<64xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["tensor.empty"]} in %arg1
- %1 = transform.structured.match ops{["scf.foreach_thread"]} in %arg1
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["tensor.empty"]} in %arg1
+ %1 = transform.structured.match ops{["scf.foreach_thread"]} in %arg1
- // tensor.empty is not tileable. The op is cloned and fused.
- transform.structured.fuse_into_containing_op %0 into %1
- }
+ // tensor.empty is not tileable. The op is cloned and fused.
+ transform.structured.fuse_into_containing_op %0 into %1
}
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-fuse.mlir b/mlir/test/Dialect/Linalg/transform-op-fuse.mlir
index e424e99e82c2a..b8992154a9db5 100644
--- a/mlir/test/Dialect/Linalg/transform-op-fuse.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-fuse.mlir
@@ -15,13 +15,10 @@ func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<
return %1 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1
- %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1
+ %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
}
// -----
@@ -45,15 +42,12 @@ func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<
return %1 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1
- %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
- %loop = transform.cast %loops#0 : !pdl.operation to !transform.op<"scf.for">
- transform.loop.peel %loop : (!transform.op<"scf.for">) -> !pdl.operation
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1
+ %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
+ %loop = transform.cast %loops#0 : !pdl.operation to !transform.op<"scf.for">
+ transform.loop.peel %loop : (!transform.op<"scf.for">) -> !pdl.operation
}
// -----
@@ -94,12 +88,9 @@ func.func @interchange_reduction(%input: tensor<12x7x25xf32>) -> tensor<12x25xf3
func.return %0 : tensor<12x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [5, 0, 7], tile_interchange = [0, 2, 1]}
- %2, %loops_2 = transform.structured.tile %1 [0, 4]
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [5, 0, 7], tile_interchange = [0, 2, 1]}
+ %2, %loops_2 = transform.structured.tile %1 [0, 4]
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-generalize.mlir b/mlir/test/Dialect/Linalg/transform-op-generalize.mlir
index da00860f12771..b961494393064 100644
--- a/mlir/test/Dialect/Linalg/transform-op-generalize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-generalize.mlir
@@ -10,11 +10,8 @@ func.func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> t
return %0 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1
- %1 = transform.structured.generalize %0
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1
+ %1 = transform.structured.generalize %0
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-interchange.mlir b/mlir/test/Dialect/Linalg/transform-op-interchange.mlir
index 094472c53b015..0f3a9fc0d2a38 100644
--- a/mlir/test/Dialect/Linalg/transform-op-interchange.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-interchange.mlir
@@ -18,13 +18,10 @@ func.func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -
return %0 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- transform.structured.interchange %0 { iterator_interchange = [1, 0]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ transform.structured.interchange %0 { iterator_interchange = [1, 0]}
}
// -----
@@ -35,12 +32,9 @@ func.func @interchange_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %a
return %0 : tensor<?x?xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- // expected-error @below {{transform applied to the wrong op kind}}
- transform.structured.interchange %0 { iterator_interchange = [1, 0]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ // expected-error @below {{transform applied to the wrong op kind}}
+ transform.structured.interchange %0 { iterator_interchange = [1, 0]}
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-match.mlir b/mlir/test/Dialect/Linalg/transform-op-match.mlir
index 22b5822746def..4a92c077d9dde 100644
--- a/mlir/test/Dialect/Linalg/transform-op-match.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-match.mlir
@@ -9,18 +9,15 @@ func.func @bar() {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %match_name = transform.structured.match ops{["arith.constant"]} in %arg1
- transform.test_print_remark_at_operand %match_name, "matched op name" : !pdl.operation
- transform.test_consume_operand %match_name
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %match_name = transform.structured.match ops{["arith.constant"]} in %arg1
+ transform.test_print_remark_at_operand %match_name, "matched op name" : !pdl.operation
+ transform.test_consume_operand %match_name
- %match_attr = transform.structured.match ops{["arith.constant"]} attributes{my_attr} in %arg1
- transform.test_print_remark_at_operand %match_attr, "matched attr name" : !pdl.operation
- transform.test_consume_operand %match_attr
- }
+ %match_attr = transform.structured.match ops{["arith.constant"]} attributes{my_attr} in %arg1
+ transform.test_print_remark_at_operand %match_attr, "matched attr name" : !pdl.operation
+ transform.test_consume_operand %match_attr
}
// -----
@@ -32,15 +29,12 @@ func.func @by_type() {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %match_name = transform.structured.match
- ops{["arith.constant"]} filter_result_type = f32 in %arg1
- transform.test_print_remark_at_operand %match_name, "matched op name" : !pdl.operation
- transform.test_consume_operand %match_name
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %match_name = transform.structured.match
+ ops{["arith.constant"]} filter_result_type = f32 in %arg1
+ transform.test_print_remark_at_operand %match_name, "matched op name" : !pdl.operation
+ transform.test_consume_operand %match_name
}
// -----
@@ -61,21 +55,18 @@ func.func @match_complex_attribute(%arg0: tensor<12x128x32xf32>)
return %1 : tensor<128x12x32xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %match_attr = transform.structured.match
- ops{["linalg.generic"]}
- attributes{iterator_types = ["parallel", "parallel", "parallel"]}
- in %arg1
- transform.test_print_remark_at_operand %match_attr, "matched complex attr" : !pdl.operation
- transform.test_consume_operand %match_attr
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %match_attr = transform.structured.match
+ ops{["linalg.generic"]}
+ attributes{iterator_types = ["parallel", "parallel", "parallel"]}
+ in %arg1
+ transform.test_print_remark_at_operand %match_attr, "matched complex attr" : !pdl.operation
+ transform.test_consume_operand %match_attr
- %no_match = transform.structured.match
- attributes{iterator_types = ["parallel", "parallel", "reduction"]}
- in %arg1
- // expected-remark @below {{0}}
- transform.test_print_number_of_associated_payload_ir_ops %no_match
- }
+ %no_match = transform.structured.match
+ attributes{iterator_types = ["parallel", "parallel", "reduction"]}
+ in %arg1
+// expected-remark @below {{0}}
+ transform.test_print_number_of_associated_payload_ir_ops %no_match
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir b/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
index afc8e5572d33d..f2f0a7cf5447d 100644
--- a/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
@@ -2,13 +2,10 @@
// CHECK-DAG: #[[$MAP13:.+]] = affine_map<() -> (13)>
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 }
- }
+transform.sequence failures(propagate) {
+ ^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 }
}
// CHECK-LABEL: @multitile_sizes_static
@@ -29,13 +26,10 @@ func.func @multitile_sizes_static(
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 }
- }
+transform.sequence failures(propagate) {
+ ^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 }
}
// CHECK: #[[$MAP_A:.+]] = affine_map<()[s0] -> ([[A_IMPL:s0 floordiv 2]])>
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad.mlir b/mlir/test/Dialect/Linalg/transform-op-pad.mlir
index fd3ee0eb65194..21eacbd1cf687 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad.mlir
@@ -31,13 +31,10 @@ func.func @static_sizes_output_divisible(%arg0: tensor<24x12xf32>,
func.return %5 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = transform.structured.pad %0 {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = transform.structured.pad %0 {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
}
// -----
@@ -50,14 +47,11 @@ func.func @pad(%arg0: tensor<24x12xf32>,
func.return %0 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- // expected-error @below {{op expects a padding value of type 'f32', got 0 : i32}}
- %1 = transform.structured.pad %0 {padding_values=[0: i32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ // expected-error @below {{op expects a padding value of type 'f32', got 0 : i32}}
+ %1 = transform.structured.pad %0 {padding_values=[0: i32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
}
// -----
@@ -70,14 +64,11 @@ func.func @pad(%arg0: tensor<24x12xf32>,
func.return %0 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- // expected-error @below {{expects a padding that parses to 'f32', got "foo"}}
- %1 = transform.structured.pad %0 {padding_values=["foo", 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ // expected-error @below {{expects a padding that parses to 'f32', got "foo"}}
+ %1 = transform.structured.pad %0 {padding_values=["foo", 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
}
// -----
@@ -91,13 +82,10 @@ func.func @pad(%arg0: tensor<24x12xf32>,
func.return %0 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(suppress) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- // This error is silenceable and is not reported by this transform
- // {{transform.structured.pad failed to apply}}
- %1 = transform.structured.pad %0 {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
- }
+transform.sequence failures(suppress) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ // This error is silenceable and is not reported by this transform
+ // {{transform.structured.pad failed to apply}}
+ %1 = transform.structured.pad %0 {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0]}
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir b/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
index 2537089d63060..89c8d3265373b 100644
--- a/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
@@ -10,12 +10,9 @@ func.func @scalarize(%arg0: tensor<24x12xf32>,
func.return %0 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1, %loops = transform.structured.tile %0 [10, 0, 0]
- %2 = transform.structured.scalarize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1, %loops = transform.structured.tile %0 [10, 0, 0]
+ %2 = transform.structured.scalarize %1
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir b/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
index a209d5597b896..6631bcf2597ac 100644
--- a/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
@@ -4,12 +4,12 @@
func.func @matmul_split(%A : tensor<?x256xf32>, %B: tensor<256x32xf32>, %C: tensor<?x32xf32>) -> tensor<?x32xf32> {
// CHECK: bufferization.alloc_tensor({{.*}}) : tensor<?x32x64xf32>
- // CHECK: linalg.generic
+ // CHECK: linalg.generic
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
// CHECK-SAME: ins(%{{[a-zA-Z0-9]*}}, %{{[a-zA-Z0-9]*}}, %{{[a-zA-Z0-9]*}} : tensor<?x256xf32>, tensor<256x32xf32>, tensor<64x4xi1>)
// CHECK-SAME: outs(%{{[a-zA-Z0-9]*}} : tensor<?x32x64xf32>) {
- // CHECK: linalg.generic
+ // CHECK: linalg.generic
// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]
// CHECK-SAME: ins(%{{[a-zA-Z0-9]*}} : tensor<?x32x64xf32>)
// CHECK-SAME: outs(%{{[a-zA-Z0-9]*}} : tensor<?x32xf32>) {
@@ -18,12 +18,9 @@ func.func @matmul_split(%A : tensor<?x256xf32>, %B: tensor<256x32xf32>, %C: tens
return %0: tensor<?x32xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1:4 = transform.structured.split_reduction %0
- { split_factor = 4, insert_split_dimension = 2, use_scaling_algorithm, use_alloc}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1:4 = transform.structured.split_reduction %0
+ { split_factor = 4, insert_split_dimension = 2, use_scaling_algorithm, use_alloc}
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir b/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
index 24ffc30d3ae18..459468a6c59f3 100644
--- a/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
@@ -3,12 +3,12 @@
// CHECK-LABEL: func.func @matmul_split
func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
- // CHECK: linalg.generic
+ // CHECK: linalg.generic
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
// CHECK-SAME: ins(%{{[a-zA-Z0-9_]*}}, %{{[a-zA-Z0-9_]*}} : tensor<16x4x64xf32>, tensor<4x64x32xf32>)
// CHECK-SAME: outs(%{{[a-zA-Z0-9_]*}} : tensor<16x32x4xf32>) {
- // CHECK: linalg.generic
+ // CHECK: linalg.generic
// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]
// CHECK-SAME: ins(%{{[a-zA-Z0-9_]*}} : tensor<16x32x4xf32>)
// CHECK-SAME: outs(%{{[a-zA-Z0-9_]*}} : tensor<16x32xf32>) {
@@ -17,11 +17,8 @@ func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: ten
return %0: tensor<16x32xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-split.mlir b/mlir/test/Dialect/Linalg/transform-op-split.mlir
index d6dff8962a224..ae1ee45c96825 100644
--- a/mlir/test/Dialect/Linalg/transform-op-split.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-split.mlir
@@ -1,12 +1,9 @@
// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file -verify-diagnostics | FileCheck %s
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1:2 = transform.structured.split %0 after 42 { dimension = 0 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1:2 = transform.structured.split %0 after 42 { dimension = 0 }
}
func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
@@ -74,14 +71,11 @@ func.func @one_d_static_overflow(%arg0: tensor<10xf32>, %arg1: tensor<10xf32>) -
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = transform.structured.match ops{["func.call"]} in %arg1
- transform.structured.split %0 after %1 { dimension = 0 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = transform.structured.match ops{["func.call"]} in %arg1
+ transform.structured.split %0 after %1 { dimension = 0 }
}
func.func private @get_size() -> index
@@ -125,14 +119,11 @@ func.func @dynamic(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1:2 = transform.structured.split %0 after 4 { dimension = 0}
- %2:2 = transform.structured.split %1#1 after 16 { dimension = 1 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1:2 = transform.structured.split %0 after 4 { dimension = 0}
+ %2:2 = transform.structured.split %1#1 after 16 { dimension = 1 }
}
func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
@@ -193,15 +184,12 @@ transform.sequence failures(propagate) {
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = transform.structured.match ops{["func.call"]} in %arg1
- // expected-error @below {{expected dynamic split point handle to point to a single-result index-typed op}}
- transform.structured.split %0 after %1 { dimension = 0 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = transform.structured.match ops{["func.call"]} in %arg1
+ // expected-error @below {{expected dynamic split point handle to point to a single-result index-typed op}}
+ transform.structured.split %0 after %1 { dimension = 0 }
}
func.func private @get_size() -> i64
@@ -222,15 +210,12 @@ func.func @dynamic(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = transform.structured.match ops{["func.call"]} in %arg1
- // expected-error @below {{expected the dynamic split point handle to point to as many operations (0) as the target handle (1)}}
- transform.structured.split %0 after %1 { dimension = 0 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = transform.structured.match ops{["func.call"]} in %arg1
+ // expected-error @below {{expected the dynamic split point handle to point to as many operations (0) as the target handle (1)}}
+ transform.structured.split %0 after %1 { dimension = 0 }
}
func.func private @get_size() -> i64
@@ -249,21 +234,11 @@ func.func @dynamic(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- pdl.pattern @func_return : benefit(1) {
- %0 = pdl.operands
- %1 = pdl.types
- %2 = pdl.operation "func.return"(%0 : !pdl.range<value>) -> (%1 : !pdl.range<type>)
- pdl.rewrite %2 with "transform.dialect"
- }
-
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["func.return"]} in %arg1
- // expected-error @below {{only applies to structured ops}}
- transform.structured.split %0 after 16 { dimension = 1 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["func.return"]} in %arg1
+ // expected-error @below {{only applies to structured ops}}
+ transform.structured.split %0 after 16 { dimension = 1 }
}
func.func @noop(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100xf32> {
@@ -273,21 +248,11 @@ func.func @noop(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100xf3
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- pdl.pattern @linalg_generic : benefit(1) {
- %0 = pdl.operands
- %1 = pdl.types
- %2 = pdl.operation "linalg.generic"(%0 : !pdl.range<value>) -> (%1 : !pdl.range<type>)
- pdl.rewrite %2 with "transform.dialect"
- }
-
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- // expected-error @below {{dimension 1 does not exist in target op}}
- transform.structured.split %0 after 16 { dimension = 1 }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ // expected-error @below {{dimension 1 does not exist in target op}}
+ transform.structured.split %0 after 16 { dimension = 1 }
}
func.func @one_d_static(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100xf32> {
diff --git a/mlir/test/Dialect/Linalg/transform-op-tile.mlir b/mlir/test/Dialect/Linalg/transform-op-tile.mlir
index 8bcfd9d0fa2e7..46027dce04e00 100644
--- a/mlir/test/Dialect/Linalg/transform-op-tile.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-tile.mlir
@@ -1,12 +1,9 @@
// RUN: mlir-opt --test-transform-dialect-interpreter --split-input-file %s | FileCheck %s
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1, %loops:3 = transform.structured.tile %0 [4, 4, 4]
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1, %loops:3 = transform.structured.tile %0 [4, 4, 4]
}
// CHECK-LABEL: func @tile_linalg_matmul(
@@ -39,14 +36,11 @@ func.func @tile_linalg_matmul(
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = transform.structured.match ops{["func.call"]} in %arg1
- %2, %loops:3 = transform.structured.tile %0 [%1, %1, 4]
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = transform.structured.match ops{["func.call"]} in %arg1
+ %2, %loops:3 = transform.structured.tile %0 [%1, %1, 4]
}
func.func private @get_dynamic_tile_size() -> index
diff --git a/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir b/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
index eb7dc6de6d4a6..321573496ebcf 100644
--- a/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
@@ -16,22 +16,11 @@ func.func @vectorize_matmul(%arg0: tensor<24x12xf32>,
func.return %0 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- pdl.pattern @pdl_target : benefit(1) {
- %args = operands
- %results = types
- %0 = pdl.operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
- // TODO: we don't want this, but it is the required terminator for pdl.pattern
- rewrite %0 with "transform.dialect"
- }
-
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -73,14 +62,11 @@ func.func @vectorize_keep_pad(
return %9 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -124,14 +110,11 @@ func.func @vectorize_pad(
return %9 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 {vectorize_padding}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 {vectorize_padding}
}
// -----
@@ -144,12 +127,9 @@ func.func @vectorize(%arg0: tensor<24x12xf32>,
func.return %0 : tensor<24x25xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- // expected-error @below {{op requires isolated-from-above targets}}
- %2 = transform.structured.vectorize %0
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ // expected-error @below {{op requires isolated-from-above targets}}
+ %2 = transform.structured.vectorize %0
}
diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 7cac837cadf3f..b66d8f604127d 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -142,13 +142,10 @@ func.func @permute_generic(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- transform.structured.interchange %0 { iterator_interchange = [1, 2, 0]}
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ transform.structured.interchange %0 { iterator_interchange = [1, 2, 0]}
}
// CHECK-LABEL: func @permute_generic
diff --git a/mlir/test/Dialect/Linalg/transform-promotion.mlir b/mlir/test/Dialect/Linalg/transform-promotion.mlir
index 5493f0c79b929..cc82ed644bb37 100644
--- a/mlir/test/Dialect/Linalg/transform-promotion.mlir
+++ b/mlir/test/Dialect/Linalg/transform-promotion.mlir
@@ -58,13 +58,10 @@ func.func @promote_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset:
// CHECK-SAME: ins(%[[v0]], %[[v1]] : memref<?x?xf32>, memref<?x?xf32>)
// CHECK-SAME: outs(%[[v2]] : memref<?x?xf32>)
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb0(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = transform.structured.promote %0 { operands_to_promote = [0, 1, 2], use_full_tiles_by_default }
- }
+transform.sequence failures(propagate) {
+^bb0(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = transform.structured.promote %0 { operands_to_promote = [0, 1, 2], use_full_tiles_by_default }
}
// -----
diff --git a/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir b/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
index b8af9def1f9ae..d95abcbbfcc79 100644
--- a/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
+++ b/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
@@ -40,20 +40,17 @@ module {
return %7 : tensor<?x?xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- // Find the root and all producers.
- %root = transform.structured.match attributes{"__root__"} in %arg1
- %producers = transform.structured.match attributes{"__producer__"} in %arg1
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ // Find the root and all producers.
+ %root = transform.structured.match attributes{"__root__"} in %arg1
+ %producers = transform.structured.match attributes{"__producer__"} in %arg1
- // Tile the root.
- %foreach_thread_op, %tiled_op = transform.structured.tile_to_foreach_thread_op %root num_threads [10, 20]
+ // Tile the root.
+ %foreach_thread_op, %tiled_op = transform.structured.tile_to_foreach_thread_op %root num_threads [10, 20]
- // Fuse all producers.
- transform.structured.fuse_into_containing_op %producers into %foreach_thread_op
- }
+ // Fuse all producers.
+ transform.structured.fuse_into_containing_op %producers into %foreach_thread_op
}
}
@@ -100,20 +97,17 @@ module {
return %7 : tensor<?x?xf32>
}
- transform.with_pdl_patterns {
- ^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- // Find the root and all producers.
- %root = transform.structured.match attributes{"__root__"} in %arg1
- %producers = transform.structured.match attributes{"__producer__"} in %arg1
- %reversed_producers = transform.test_reverse_payload_ops %producers
+ transform.sequence failures(propagate) {
+ ^bb1(%arg1: !pdl.operation):
+ // Find the root and all producers.
+ %root = transform.structured.match attributes{"__root__"} in %arg1
+ %producers = transform.structured.match attributes{"__producer__"} in %arg1
+ %reversed_producers = transform.test_reverse_payload_ops %producers
- // Tile the root.
- %foreach_thread_op, %tiled_op = transform.structured.tile_to_foreach_thread_op %root num_threads [10, 20]
+ // Tile the root.
+ %foreach_thread_op, %tiled_op = transform.structured.tile_to_foreach_thread_op %root num_threads [10, 20]
- // Fuse all producers.
- transform.structured.fuse_into_containing_op %reversed_producers into %foreach_thread_op
- }
+ // Fuse all producers.
+ transform.structured.fuse_into_containing_op %reversed_producers into %foreach_thread_op
}
}
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 9b0dfc3be0ff9..f3735be9a3a5d 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -12,14 +12,11 @@ func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memre
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.dot"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.dot"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
}
// -----
@@ -34,14 +31,11 @@ func.func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
}
// -----
@@ -55,14 +49,11 @@ func.func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf3
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
}
// -----
@@ -77,14 +68,11 @@ func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
}
// -----
@@ -120,14 +108,11 @@ func.func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -163,14 +148,11 @@ func.func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -193,14 +175,11 @@ func.func @generic_interchanged_transpose(%arg0: tensor<12x128x32xf32>) -> tenso
return %1 : tensor<128x12x32xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -236,14 +215,11 @@ func.func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -259,14 +235,11 @@ func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns }
}
// -----
@@ -286,14 +259,11 @@ func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -313,14 +283,11 @@ func.func @test_do_not_vectorize_unsupported_element_types(%A : memref<8x16xcomp
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -333,14 +300,11 @@ func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -354,14 +318,11 @@ func.func @test_vectorize_fill_scalar(%A : memref<f32>, %arg0 : f32) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -374,14 +335,11 @@ func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["memref.copy"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["memref.copy"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -397,14 +355,11 @@ func.func @test_vectorize_copy_scalar(%A : memref<f32>, %B : memref<f32>) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["memref.copy"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["memref.copy"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -427,14 +382,11 @@ func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -459,14 +411,11 @@ func.func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -547,14 +496,11 @@ func.func @generic_vectorize(%arg0: memref<4x256xf32>,
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -641,14 +587,11 @@ func.func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -688,14 +631,11 @@ func.func @generic_vectorize_broadcast_transpose(
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -734,14 +674,11 @@ func.func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>,
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -769,14 +706,11 @@ func.func @matmul_tensors(
return %0 : tensor<8x12xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -801,14 +735,11 @@ func.func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { vectorize_padding }
}
// -----
@@ -833,14 +764,11 @@ func.func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tenso
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { vectorize_padding }
}
@@ -873,14 +801,11 @@ func.func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: in
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { vectorize_padding }
}
@@ -907,14 +832,11 @@ func.func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { vectorize_padding }
}
// -----
@@ -943,14 +865,11 @@ func.func @pad_and_transfer_write_static(
return %3 : tensor<5x6xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4 { vectorize_padding }
}
@@ -983,14 +902,11 @@ func.func @pad_and_transfer_write_dynamic_static(
return %3 : tensor<?x6xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4 { vectorize_padding }
}
@@ -1020,14 +936,11 @@ func.func @pad_and_insert_slice_source(
return %r : tensor<12x13xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4 { vectorize_padding }
}
@@ -1051,14 +964,11 @@ func.func @pad_and_insert_slice_dest(
return %r : tensor<1x12x13xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1091,14 +1001,11 @@ func.func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x1
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["tensor.pad"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4 { vectorize_padding }
}
// -----
@@ -1129,14 +1036,11 @@ func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1177,14 +1081,11 @@ func.func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
// -----
@@ -1210,14 +1111,11 @@ func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4 { vectorize_padding }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4 { vectorize_padding }
}
// -----
@@ -1244,14 +1142,11 @@ func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1277,14 +1172,11 @@ func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1310,14 +1202,11 @@ func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1343,14 +1232,11 @@ func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1376,14 +1262,11 @@ func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1413,14 +1296,11 @@ func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) ->
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1454,18 +1334,15 @@ func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
- %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
- %5 = transform.structured.vectorize %4
- }
+ %3 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %4 = get_closest_isolated_parent %3 : (!pdl.operation) -> !pdl.operation
+ %5 = transform.structured.vectorize %4
}
// -----
@@ -1504,14 +1381,11 @@ func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
return %2 : tensor<f32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
@@ -1538,14 +1412,11 @@ func.func @not_projected_permutation(%arg0: tensor<8x8xf32>) -> tensor<6x6x3x3xf
return %result : tensor<6x6x3x3xf32>
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1
}
// -----
@@ -1580,12 +1451,9 @@ func.func @mixed_parallel_reduced_results(%arg0 : tensor<2x4x8xf32>,
// CHECK-DAG: vector.transfer_write %[[MUL]], %[[ARG2]]
// CHECK-DAG: vector.transfer_write %[[ADD]], %[[ARG3]]
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- transform.sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
- %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
- %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1
+ %1 = get_closest_isolated_parent %0 : (!pdl.operation) -> !pdl.operation
+ %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns }
}
diff --git a/mlir/test/Dialect/SCF/transform-ops.mlir b/mlir/test/Dialect/SCF/transform-ops.mlir
index 44b220a1b8120..baca3c811ec0b 100644
--- a/mlir/test/Dialect/SCF/transform-ops.mlir
+++ b/mlir/test/Dialect/SCF/transform-ops.mlir
@@ -15,19 +15,16 @@ func.func @get_parent_for_op(%arg0: index, %arg1: index, %arg2: index) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["arith.addi"]} in %arg1
- // CHECK: = transform.loop.get_parent_for
- %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
- %2 = transform.loop.get_parent_for %0 { num_loops = 2 } : (!pdl.operation) -> !transform.op<"scf.for">
- %3 = transform.loop.get_parent_for %0 { num_loops = 3 } : (!pdl.operation) -> !transform.op<"scf.for">
- transform.test_print_remark_at_operand %1, "third loop" : !transform.op<"scf.for">
- transform.test_print_remark_at_operand %2, "second loop" : !transform.op<"scf.for">
- transform.test_print_remark_at_operand %3, "first loop" : !transform.op<"scf.for">
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["arith.addi"]} in %arg1
+ // CHECK: = transform.loop.get_parent_for
+ %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
+ %2 = transform.loop.get_parent_for %0 { num_loops = 2 } : (!pdl.operation) -> !transform.op<"scf.for">
+ %3 = transform.loop.get_parent_for %0 { num_loops = 3 } : (!pdl.operation) -> !transform.op<"scf.for">
+ transform.test_print_remark_at_operand %1, "third loop" : !transform.op<"scf.for">
+ transform.test_print_remark_at_operand %2, "second loop" : !transform.op<"scf.for">
+ transform.test_print_remark_at_operand %3, "first loop" : !transform.op<"scf.for">
}
// -----
@@ -38,14 +35,11 @@ func.func @get_parent_for_op_no_loop(%arg0: index, %arg1: index) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["arith.addi"]} in %arg1
- // expected-error @below {{could not find an 'scf.for' parent}}
- %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["arith.addi"]} in %arg1
+ // expected-error @below {{could not find an 'scf.for' parent}}
+ %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
}
// -----
@@ -80,15 +74,12 @@ func.func @loop_outline_op(%arg0: index, %arg1: index, %arg2: index) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["arith.addi"]} in %arg1
- %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
- // CHECK: = transform.loop.outline %{{.*}}
- transform.loop.outline %1 {func_name = "foo"} : (!transform.op<"scf.for">) -> !pdl.operation
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["arith.addi"]} in %arg1
+ %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
+ // CHECK: = transform.loop.outline %{{.*}}
+ transform.loop.outline %1 {func_name = "foo"} : (!transform.op<"scf.for">) -> !pdl.operation
}
// -----
@@ -109,14 +100,11 @@ func.func @loop_outline_op_multi_region() {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["scf.while"]} in %arg1
- // expected-error @below {{failed to outline}}
- transform.loop.outline %0 {func_name = "foo"} : (!pdl.operation) -> !pdl.operation
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["scf.while"]} in %arg1
+ // expected-error @below {{failed to outline}}
+ transform.loop.outline %0 {func_name = "foo"} : (!pdl.operation) -> !pdl.operation
}
// -----
@@ -140,14 +128,11 @@ func.func @loop_peel_op() {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["arith.addi"]} in %arg1
- %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
- transform.loop.peel %1 : (!transform.op<"scf.for">) -> !pdl.operation
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["arith.addi"]} in %arg1
+ %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
+ transform.loop.peel %1 : (!transform.op<"scf.for">) -> !pdl.operation
}
// -----
@@ -176,16 +161,13 @@ func.func @loop_pipeline_op(%A: memref<?xf32>, %result: memref<?xf32>) {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["arith.addf"]} in %arg1
- %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
- %2 = transform.loop.pipeline %1 : (!transform.op<"scf.for">) -> !pdl.operation
- // Verify that the returned handle is usable.
- transform.test_print_remark_at_operand %2, "transformed" : !pdl.operation
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["arith.addf"]} in %arg1
+ %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
+ %2 = transform.loop.pipeline %1 : (!transform.op<"scf.for">) -> !pdl.operation
+ // Verify that the returned handle is usable.
+ transform.test_print_remark_at_operand %2, "transformed" : !pdl.operation
}
// -----
@@ -203,13 +185,10 @@ func.func @loop_unroll_op() {
return
}
-transform.with_pdl_patterns {
-^bb0(%arg0: !pdl.operation):
- sequence %arg0 : !pdl.operation failures(propagate) {
- ^bb1(%arg1: !pdl.operation):
- %0 = transform.structured.match ops{["arith.addi"]} in %arg1
- %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
- transform.loop.unroll %1 { factor = 4 } : !transform.op<"scf.for">
- }
+transform.sequence failures(propagate) {
+^bb1(%arg1: !pdl.operation):
+ %0 = transform.structured.match ops{["arith.addi"]} in %arg1
+ %1 = transform.loop.get_parent_for %0 : (!pdl.operation) -> !transform.op<"scf.for">
+ transform.loop.unroll %1 { factor = 4 } : !transform.op<"scf.for">
}
More information about the Mlir-commits
mailing list