[Mlir-commits] [mlir] [mlir] use transform-interpreter in test passes (PR #70040)

Oleksandr Alex Zinenko llvmlistbot at llvm.org
Tue Oct 24 06:26:13 PDT 2023


https://github.com/ftynse updated https://github.com/llvm/llvm-project/pull/70040

>From e465fa8ebe7c9cdb51fbbe4dee4a80ae2a598679 Mon Sep 17 00:00:00 2001
From: Alex Zinenko <zinenko at google.com>
Date: Tue, 24 Oct 2023 12:53:47 +0000
Subject: [PATCH 1/2] [mlir] use transform-interpreter in test passes

Update most test passes to use the transform-interpreter pass instead of
the test-transform-dialect-interpreter-pass. The new "main" interpreter
pass has a named entry point instead of looking up the top-level op with
`PossibleTopLevelOpTrait`, which is arguably a more understandable
interface. The change is mechanical, rewriting an unnamed sequence into
a named one and wrapping the transform IR in to a module when necessary.

Add an option to the transform-interpreter pass to target a tagged
payload op instead of the root anchor op, which is also useful for repro
generation.

Only the test in the transform dialect proper and the examples have not
been updated yet. These will be updated separately after a more careful
consideration of testing coverage of the transform interpreter logic.
---
 .../Dialect/Transform/Transforms/Passes.td    |   8 +
 .../Transforms/TransformInterpreterUtils.h    |  27 +-
 .../Transform/Transforms/InterpreterPass.cpp  |  49 +-
 .../Transforms/TransformInterpreterUtils.cpp  |  12 +-
 .../FuncToLLVM/func-memref-return.mlir        |  31 +-
 .../Conversion/FuncToLLVM/func-to-llvm.mlir   |  36 +-
 .../Conversion/GPUToNVVM/gpu-to-nvvm-32b.mlir |  56 +-
 .../Conversion/GPUToNVVM/gpu-to-nvvm.mlir     |  72 +-
 .../Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir |  24 +-
 ...nsform-op-simplify-bounded-affine-ops.mlir |  52 +-
 .../Transforms/transform-ops.mlir             |  92 ++-
 .../test/Dialect/GPU/barrier-elimination.mlir |  16 +-
 .../GPU/subgroup-mma-vector-unroll.mlir       |  28 +-
 .../Dialect/GPU/transform-gpu-failing.mlir    | 218 +++---
 mlir/test/Dialect/GPU/transform-gpu.mlir      | 168 +++--
 .../lower-to-llvm-e2e-with-target-tag.mlir    |   8 +-
 mlir/test/Dialect/LLVM/transform-e2e.mlir     |  50 +-
 .../Linalg/convert-conv2d-to-img2col.mlir     | 128 ++--
 .../Linalg/generalize-tensor-pack-tile.mlir   |  32 +-
 .../Linalg/generalize-tensor-unpack-tile.mlir |  20 +-
 mlir/test/Dialect/Linalg/hoisting.mlir        | 226 +++---
 .../Dialect/Linalg/match-ops-interpreter.mlir |  57 +-
 .../Linalg/matmul-shared-memory-padding.mlir  | 288 ++++----
 .../Dialect/Linalg/multisize-tiling-full.mlir |  60 +-
 ...ot-bufferize-empty-tensor-elimination.mlir |  18 +-
 .../Linalg/pad-to-specific-memory-space.mlir  |  54 +-
 mlir/test/Dialect/Linalg/promote.mlir         |  66 +-
 .../Dialect/Linalg/promotion_options.mlir     |  14 +-
 mlir/test/Dialect/Linalg/tile-conv.mlir       |   8 +-
 mlir/test/Dialect/Linalg/tile-indexed.mlir    |  22 +-
 mlir/test/Dialect/Linalg/tile-softmax.mlir    |  36 +-
 mlir/test/Dialect/Linalg/tile-tensors.mlir    |  20 +-
 mlir/test/Dialect/Linalg/tile-to-forall.mlir  | 138 ++--
 .../Dialect/Linalg/transform-lower-pack.mlir  | 202 +++---
 .../transform-op-bufferize-to-allocation.mlir | 172 +++--
 ...compose-masked-vectorize-and-cleanups.mlir |  46 +-
 .../Linalg/transform-op-decompose.mlir        |  16 +-
 .../transform-op-fuse-into-containing.mlir    | 218 +++---
 .../Dialect/Linalg/transform-op-fuse.mlir     |  80 ++-
 .../Linalg/transform-op-generalize.mlir       |  12 +-
 .../transform-op-gpu-map-copy-to-threads.mlir | 342 +++++----
 ...-op-hoist-pad-build-packing-loop-nest.mlir | 152 ++--
 .../Linalg/transform-op-hoist-pad.mlir        | 164 +++--
 .../transform-op-insert-slice-to-copy.mlir    |  84 ++-
 .../Linalg/transform-op-interchange.mlir      |  37 +-
 .../Dialect/Linalg/transform-op-match.mlir    |  98 +--
 .../transform-op-matmul-to-outerproduct.mlir  |  20 +-
 .../Linalg/transform-op-multitile-sizes.mlir  |  62 +-
 .../Dialect/Linalg/transform-op-pack.mlir     | 418 ++++++-----
 .../test/Dialect/Linalg/transform-op-pad.mlir | 160 +++--
 .../Dialect/Linalg/transform-op-replace.mlir  |  54 +-
 ...-rewrite-in-destination-passing-style.mlir | 114 +--
 .../Linalg/transform-op-scalarize.mlir        |  14 +-
 ...ansform-op-split-reduction-by-scaling.mlir |  16 +-
 .../Linalg/transform-op-split-reduction.mlir  |  98 +--
 .../Dialect/Linalg/transform-op-split.mlir    | 122 ++--
 .../Dialect/Linalg/transform-op-tile.mlir     | 110 +--
 .../Linalg/transform-op-vectorize.mlir        |  74 +-
 .../Linalg/transform-pack-greedily.mlir       | 200 +++---
 .../Dialect/Linalg/transform-patterns.mlir    |  72 +-
 .../Dialect/Linalg/transform-promotion.mlir   |  45 +-
 .../Linalg/transform-tile-and-fuse.mlir       |  52 +-
 .../Linalg/transform-tile-reduction.mlir      | 134 ++--
 .../Linalg/vectorization-scalable.mlir        |  42 +-
 .../Linalg/vectorization-with-patterns.mlir   | 676 ++++++++++--------
 mlir/test/Dialect/Linalg/vectorization.mlir   | 156 ++--
 .../vectorize-tensor-extract-masked.mlir      |  70 +-
 .../Linalg/vectorize-tensor-extract.mlir      | 176 +++--
 mlir/test/Dialect/MemRef/alloc-to-alloca.mlir |  34 +-
 .../MemRef/extract-address-computations.mlir  | 174 +++--
 .../Dialect/MemRef/make-loop-independent.mlir |  22 +-
 mlir/test/Dialect/MemRef/transform-ops.mlir   | 148 ++--
 .../test/Dialect/NVGPU/tmaload-transform.mlir |  32 +-
 .../NVGPU/transform-create-async-groups.mlir  |  92 ++-
 .../NVGPU/transform-matmul-to-nvvm.mlir       |  38 +-
 .../NVGPU/transform-pipeline-shared.mlir      |  46 +-
 .../SCF/transform-loop-fuse-sibling.mlir      |  66 +-
 .../Dialect/SCF/transform-op-coalesce.mlir    |  40 +-
 .../SCF/transform-op-forall-to-for.mlir       |  48 +-
 .../SCF/transform-op-take-assumed-branch.mlir |  66 +-
 .../Dialect/SCF/transform-ops-invalid.mlir    |  56 +-
 mlir/test/Dialect/SCF/transform-ops.mlir      | 186 ++---
 .../Dialect/SparseTensor/transform-ops.mlir   |   6 +-
 mlir/test/Dialect/Tensor/fold-empty-op.mlir   |  28 +-
 ...nsor-subset-ops-into-vector-transfers.mlir |  14 +-
 .../Dialect/Tensor/rewrite-as-constant.mlir   |  14 +-
 mlir/test/Dialect/Tensor/tiling.mlir          | 172 +++--
 .../transform-op-make-loop-independent.mlir   |  54 +-
 .../Vector/transform-op-vector-to-llvm.mlir   |  20 +-
 .../test/Dialect/Vector/transform-vector.mlir |  96 +--
 .../vector-broadcast-lowering-transforms.mlir |  18 +-
 .../vector-contract-matvec-transforms.mlir    |  14 +-
 .../vector-contract-to-dot-transforms.mlir    |  18 +-
 ...tract-to-matrix-intrinsics-transforms.mlir |  24 +-
 ...o-outerproduct-transforms-unsupported.mlir |  18 +-
 ...r-contract-to-outerproduct-transforms.mlir |  18 +-
 ...contract-to-parallel-arith-transforms.mlir |  18 +-
 .../vector-mask-lowering-transforms.mlir      |  38 +-
 .../Vector/vector-materialize-mask.mlir       |  14 +-
 .../vector-multi-reduction-lowering.mlir      |  14 +-
 ...vector-multi-reduction-outer-lowering.mlir |  14 +-
 ...ctor-outerproduct-lowering-transforms.mlir |  24 +-
 .../Vector/vector-rewrite-narrow-types.mlir   |  42 +-
 .../vector-scalable-create-mask-lowering.mlir |  18 +-
 ...-shape-cast-lowering-scalable-vectors.mlir |  18 +-
 ...vector-shape-cast-lowering-transforms.mlir |  18 +-
 mlir/unittests/Dialect/Transform/Preload.cpp  |  14 +-
 107 files changed, 4754 insertions(+), 3682 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td b/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td
index 286f69bc52486cb..94965a67c0c1233 100644
--- a/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td
@@ -70,6 +70,14 @@ def InterpreterPass : Pass<"transform-interpreter"> {
   }];
   let dependentDialects = ["::mlir::transform::TransformDialect"];
   let options = [
+    Option<"debugPayloadRootTag", "debug-payload-root-tag", "std::string",
+           /*default=*/[{""}],
+           "Select the operation with 'transform.target_tag' attribute having "
+           "the given value as payload IR root. If empty select the pass "
+           "anchor operation as the payload IR root.">,
+    Option<"disableExpensiveChecks", "disable-expensive-checks", "bool",
+           "false",
+           "Disable expensive checks in the interpreter for a faster run.">,
     Option<"entryPoint", "entry-point", "std::string",
            /*default=*/[{"__transform_main"}],
            "Entry point of the pass pipeline.">,
diff --git a/mlir/include/mlir/Dialect/Transform/Transforms/TransformInterpreterUtils.h b/mlir/include/mlir/Dialect/Transform/Transforms/TransformInterpreterUtils.h
index 3fc02267f26e9da..3828021d9543035 100644
--- a/mlir/include/mlir/Dialect/Transform/Transforms/TransformInterpreterUtils.h
+++ b/mlir/include/mlir/Dialect/Transform/Transforms/TransformInterpreterUtils.h
@@ -85,21 +85,18 @@ LogicalResult mergeSymbolsInto(Operation *target,
                                OwningOpRef<Operation *> other);
 } // namespace detail
 
-/// Standalone util to apply the named sequence `entryPoint` to the payload.
-/// This is done in 3 steps:
-///   1. lookup the `entryPoint` symbol in `{payload, sharedTransformModule}` by
-///   calling detail::findTransformEntryPoint.
-///   2. if the entry point is found and not nested under
-///   `sharedTransformModule`, call `detail::defineDeclaredSymbols` to "link" in
-///   the `sharedTransformModule`. Note: this may modify the transform IR
-///   embedded with the payload IR.
-///   3. apply the transform IR to the payload IR, relaxing the requirement that
-///   the transform IR is a top-level transform op. We are applying a named
-///   sequence anyway.
-LogicalResult applyTransformNamedSequence(
-    Operation *payload, ModuleOp transformModule,
-    const TransformOptions &options,
-    StringRef entryPoint = TransformDialect::kTransformEntryPointSymbolName);
+/// Standalone util to apply the named sequence `transformRoot` to `payload` IR.
+/// This is done in 2 steps:
+///   1. If `transformModule` is provided and is not nested under
+///      `transformRoot`, it will be "linked into" the IR containing
+///      `transformRoot` to resolve undefined named sequences.
+///   2. The transforms specified in `transformRoot` are applied to `payload`,
+///      assuming the named sequence has a single argument handle that will be
+///      associated with `payload` on run.
+LogicalResult applyTransformNamedSequence(Operation *payload,
+                                          Operation *transformRoot,
+                                          ModuleOp transformModule,
+                                          const TransformOptions &options);
 
 } // namespace transform
 } // namespace mlir
diff --git a/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp b/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp
index 3ec51d88729a0e7..c875519945b9218 100644
--- a/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp
+++ b/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp
@@ -19,6 +19,39 @@ namespace transform {
 } // namespace transform
 } // namespace mlir
 
+/// Returns the payload operation to be used as payload root:
+///   - the operation nested under `passRoot` that has the given tag attribute,
+///     must be unique;
+///   - the `passRoot` itself if the tag is empty.
+static Operation *findPayloadRoot(Operation *passRoot, StringRef tag) {
+  // Fast return.
+  if (tag.empty())
+    return passRoot;
+
+  // Walk to do a lookup.
+  Operation *target = nullptr;
+  auto tagAttrName = StringAttr::get(
+      passRoot->getContext(), transform::TransformDialect::kTargetTagAttrName);
+  WalkResult walkResult = passRoot->walk([&](Operation *op) {
+    auto attr = op->getAttrOfType<StringAttr>(tagAttrName);
+    if (!attr || attr.getValue() != tag)
+      return WalkResult::advance();
+
+    if (!target) {
+      target = op;
+      return WalkResult::advance();
+    }
+
+    InFlightDiagnostic diag = op->emitError()
+                              << "repeated operation with the target tag '"
+                              << tag << "'";
+    diag.attachNote(target->getLoc()) << "previously seen operation";
+    return WalkResult::interrupt();
+  });
+
+  return walkResult.wasInterrupted() ? nullptr : target;
+}
+
 namespace {
 class InterpreterPass
     : public transform::impl::InterpreterPassBase<InterpreterPass> {
@@ -29,10 +62,22 @@ class InterpreterPass
     MLIRContext *context = &getContext();
     ModuleOp transformModule =
         transform::detail::getPreloadedTransformModule(context);
+    Operation *payloadRoot =
+        findPayloadRoot(getOperation(), debugPayloadRootTag);
+    Operation *transformEntryPoint = transform::detail::findTransformEntryPoint(
+        getOperation(), transformModule, entryPoint);
+    if (!transformEntryPoint) {
+      getOperation()->emitError()
+          << "could not find transform entry point: " << entryPoint
+          << " in either payload or transform module";
+      return signalPassFailure();
+    }
+
     if (failed(transform::applyTransformNamedSequence(
-            getOperation(), transformModule,
-            options.enableExpensiveChecks(true), entryPoint)))
+            payloadRoot, transformEntryPoint, transformModule,
+            options.enableExpensiveChecks(!disableExpensiveChecks)))) {
       return signalPassFailure();
+    }
   }
 
 private:
diff --git a/mlir/lib/Dialect/Transform/Transforms/TransformInterpreterUtils.cpp b/mlir/lib/Dialect/Transform/Transforms/TransformInterpreterUtils.cpp
index e6d692072267c1f..2e32b7f71d10448 100644
--- a/mlir/lib/Dialect/Transform/Transforms/TransformInterpreterUtils.cpp
+++ b/mlir/lib/Dialect/Transform/Transforms/TransformInterpreterUtils.cpp
@@ -409,16 +409,8 @@ transform::detail::mergeSymbolsInto(Operation *target,
 }
 
 LogicalResult transform::applyTransformNamedSequence(
-    Operation *payload, ModuleOp transformModule,
-    const TransformOptions &options, StringRef entryPoint) {
-  Operation *transformRoot =
-      detail::findTransformEntryPoint(payload, transformModule, entryPoint);
-  if (!transformRoot) {
-    return payload->emitError()
-           << "could not find transform entry point: " << entryPoint
-           << " in either payload or transform module";
-  }
-
+    Operation *payload, Operation *transformRoot, ModuleOp transformModule,
+    const TransformOptions &options) {
   // `transformModule` may not be modified.
   if (transformModule && !transformModule->isAncestor(transformRoot)) {
     OwningOpRef<Operation *> clonedTransformModule(transformModule->clone());
diff --git a/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
index b3883703fcdc1ac..b584d4ce28f52ab 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
@@ -2,7 +2,7 @@
 
 // RUN: mlir-opt -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1 use-opaque-pointers=1'  %s | FileCheck %s --check-prefix=BAREPTR
 
-// RUN: mlir-opt -test-transform-dialect-interpreter %s | FileCheck %s --check-prefix=BAREPTR
+// RUN: mlir-opt -transform-interpreter %s | FileCheck %s --check-prefix=BAREPTR
 
 // These tests were separated from func-memref.mlir because applying
 // -reconcile-unrealized-casts resulted in `llvm.extractvalue` ops getting
@@ -110,17 +110,20 @@ func.func @unranked_memref(%arg0:memref<*xi32>) {
 }
 func.func private @printMemrefI32(memref<*xi32>)
 
-transform.sequence failures(propagate) {
-^bb1(%toplevel_module: !transform.any_op):
-  %func = transform.structured.match ops{["func.func"]} in %toplevel_module
-    : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %func {
-    transform.apply_conversion_patterns.func.func_to_llvm
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-      {use_bare_ptr_call_conv = true, use_opaque_pointers = true}
-  } {
-    legal_dialects = ["llvm"],
-    partial_conversion
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(
+      %toplevel_module: !transform.any_op {transform.readonly}) {
+    %func = transform.structured.match ops{["func.func"]} in %toplevel_module
+      : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %func {
+      transform.apply_conversion_patterns.func.func_to_llvm
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+        {use_bare_ptr_call_conv = true, use_opaque_pointers = true}
+    } {
+      legal_dialects = ["llvm"],
+      partial_conversion
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
index 819611a59dde5df..8254e77c8628bdf 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
@@ -2,7 +2,7 @@
 
 // RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32 use-opaque-pointers=1},reconcile-unrealized-casts)" %s | FileCheck --check-prefix=CHECK32 %s
 
-// RUN: mlir-opt -test-transform-dialect-interpreter %s | FileCheck --check-prefix=CHECK32 %s
+// RUN: mlir-opt -transform-interpreter %s | FileCheck --check-prefix=CHECK32 %s
 
 // Same below, but using the `ConvertToLLVMPatternInterface` entry point
 // and the generic `convert-to-llvm` pass.
@@ -537,20 +537,22 @@ func.func @switchi8(%arg0 : i8) -> i32 {
 // CHECK-NEXT:     llvm.return %[[E1]] : i32
 // CHECK-NEXT:   }
 
-transform.sequence failures(propagate) {
-^bb1(%toplevel_module: !transform.any_op):
-  %func = transform.structured.match ops{["func.func"]} in %toplevel_module
-    : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %func {
-    transform.apply_conversion_patterns.dialect_to_llvm "math"
-    transform.apply_conversion_patterns.dialect_to_llvm "arith"
-    transform.apply_conversion_patterns.dialect_to_llvm "cf"
-    transform.apply_conversion_patterns.func.func_to_llvm
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-      {index_bitwidth = 32, use_opaque_pointers = true}
-  } {
-    legal_dialects = ["llvm"], 
-    partial_conversion
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.readonly}) {
+    %func = transform.structured.match ops{["func.func"]} in %toplevel_module
+      : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %func {
+      transform.apply_conversion_patterns.dialect_to_llvm "math"
+      transform.apply_conversion_patterns.dialect_to_llvm "arith"
+      transform.apply_conversion_patterns.dialect_to_llvm "cf"
+      transform.apply_conversion_patterns.func.func_to_llvm
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+        {index_bitwidth = 32, use_opaque_pointers = true}
+    } {
+      legal_dialects = ["llvm"], 
+      partial_conversion
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm-32b.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm-32b.mlir
index 62f61629ea9d037..9bdce31261064e5 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm-32b.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm-32b.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -convert-gpu-to-nvvm='index-bitwidth=32 use-opaque-pointers=1' -split-input-file | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
 
 gpu.module @test_module_0 {
   // CHECK-LABEL: func @gpu_index_ops()
@@ -48,30 +48,32 @@ gpu.module @test_module_1 {
   }
 }
 
-transform.sequence failures(propagate) {
-^bb1(%toplevel_module: !transform.any_op):
-  %gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module
-    : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %gpu_module {
-    transform.apply_conversion_patterns.dialect_to_llvm "arith"
-    transform.apply_conversion_patterns.dialect_to_llvm "cf"
-    transform.apply_conversion_patterns.vector.vector_to_llvm
-    transform.apply_conversion_patterns.func.func_to_llvm
-    transform.apply_conversion_patterns.dialect_to_llvm "memref"
-    transform.apply_conversion_patterns.gpu.gpu_to_nvvm
-    transform.apply_conversion_patterns.gpu.gpu_wmma_to_nvvm
-    transform.apply_conversion_patterns.gpu.gpu_subgroup_reduce_to_nvvm {has_redux = true}
-    transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-      {index_bitwidth = 32, use_opaque_pointers = true}
-  } {
-    legal_dialects = ["llvm", "memref", "nvvm"],
-    legal_ops = ["func.func", "gpu.module", "gpu.module_end", "gpu.yield"],
-    illegal_dialects = ["gpu"],
-    illegal_ops = ["llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil",
-                   "llvm.ffloor", "llvm.log", "llvm.log10", "llvm.log2", "llvm.pow",
-                   "llvm.sin", "llvm.sqrt"],
-    partial_conversion
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.readonly}) {
+    %gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module
+      : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %gpu_module {
+      transform.apply_conversion_patterns.dialect_to_llvm "arith"
+      transform.apply_conversion_patterns.dialect_to_llvm "cf"
+      transform.apply_conversion_patterns.vector.vector_to_llvm
+      transform.apply_conversion_patterns.func.func_to_llvm
+      transform.apply_conversion_patterns.dialect_to_llvm "memref"
+      transform.apply_conversion_patterns.gpu.gpu_to_nvvm
+      transform.apply_conversion_patterns.gpu.gpu_wmma_to_nvvm
+      transform.apply_conversion_patterns.gpu.gpu_subgroup_reduce_to_nvvm {has_redux = true}
+      transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+        {index_bitwidth = 32, use_opaque_pointers = true}
+    } {
+      legal_dialects = ["llvm", "memref", "nvvm"],
+      legal_ops = ["func.func", "gpu.module", "gpu.module_end", "gpu.yield"],
+      illegal_dialects = ["gpu"],
+      illegal_ops = ["llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil",
+                    "llvm.ffloor", "llvm.log", "llvm.log10", "llvm.log2", "llvm.pow",
+                    "llvm.sin", "llvm.sqrt"],
+      partial_conversion
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index a8c02e32ef92b6b..92d85a473d2318f 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s -convert-gpu-to-nvvm='has-redux=1 use-opaque-pointers=1' -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
 
 gpu.module @test_module_0 {
   // CHECK-LABEL: func @gpu_index_ops()
@@ -627,38 +627,40 @@ gpu.module @test_module_31 {
   }
 }
 
-transform.sequence failures(propagate) {
-^bb1(%toplevel_module: !transform.any_op):
-  %gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module
-    : (!transform.any_op) -> !transform.any_op
-
-  transform.apply_patterns to %gpu_module {
-    transform.apply_patterns.gpu.gpu_rewrite_patterns
-  } : !transform.any_op
-
-  transform.apply_conversion_patterns to %gpu_module {
-    transform.apply_conversion_patterns.dialect_to_llvm "arith"
-    transform.apply_conversion_patterns.dialect_to_llvm "cf"
-    transform.apply_conversion_patterns.vector.vector_to_llvm
-    transform.apply_conversion_patterns.func.func_to_llvm
-    transform.apply_conversion_patterns.dialect_to_llvm "memref"
-    transform.apply_conversion_patterns.gpu.gpu_to_nvvm
-    transform.apply_conversion_patterns.gpu.gpu_wmma_to_nvvm
-    transform.apply_conversion_patterns.gpu.gpu_subgroup_reduce_to_nvvm
-    transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-      {index_bitwidth = 64,
-       use_bare_ptr = true,
-       use_bare_ptr_memref_call_conv = true,
-       use_opaque_pointers = true}
-  } {
-    legal_dialects = ["llvm", "memref", "nvvm", "test"],
-    legal_ops = ["func.func", "gpu.module", "gpu.module_end", "gpu.yield"],
-    illegal_dialects = ["gpu"],
-    illegal_ops = ["llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil",
-                   "llvm.ffloor", "llvm.log", "llvm.log10", "llvm.log2","llvm.pow",
-                   "llvm.sin", "llvm.sqrt"],
-    partial_conversion
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.readonly}) {
+    %gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module
+      : (!transform.any_op) -> !transform.any_op
+
+    transform.apply_patterns to %gpu_module {
+      transform.apply_patterns.gpu.gpu_rewrite_patterns
+    } : !transform.any_op
+
+    transform.apply_conversion_patterns to %gpu_module {
+      transform.apply_conversion_patterns.dialect_to_llvm "arith"
+      transform.apply_conversion_patterns.dialect_to_llvm "cf"
+      transform.apply_conversion_patterns.vector.vector_to_llvm
+      transform.apply_conversion_patterns.func.func_to_llvm
+      transform.apply_conversion_patterns.dialect_to_llvm "memref"
+      transform.apply_conversion_patterns.gpu.gpu_to_nvvm
+      transform.apply_conversion_patterns.gpu.gpu_wmma_to_nvvm
+      transform.apply_conversion_patterns.gpu.gpu_subgroup_reduce_to_nvvm
+      transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+        {index_bitwidth = 64,
+        use_bare_ptr = true,
+        use_bare_ptr_memref_call_conv = true,
+        use_opaque_pointers = true}
+    } {
+      legal_dialects = ["llvm", "memref", "nvvm", "test"],
+      legal_ops = ["func.func", "gpu.module", "gpu.module_end", "gpu.yield"],
+      illegal_dialects = ["gpu"],
+      illegal_ops = ["llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil",
+                    "llvm.ffloor", "llvm.log", "llvm.log10", "llvm.log2","llvm.pow",
+                    "llvm.sin", "llvm.sqrt"],
+      partial_conversion
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index c7d28e7443695fc..123a661193c4901 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s -convert-nvgpu-to-nvvm='use-opaque-pointers=1' | FileCheck %s
-// RUN: mlir-opt %s -test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
 
 // CHECK-LABEL: @m16n8k16_fp16
 func.func @m16n8k16_fp16(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2: vector<2x2xf16>) -> vector<2x2xf16> {
@@ -1113,14 +1113,16 @@ func.func @warpgroup_matrix_multiply_m128n128k64(
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %0 {
-    transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-      {use_opaque_pointers = true}
-  } {legal_dialects = ["arith", "func", "llvm", "memref", "nvvm", "vector", "scf"], partial_conversion} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 
+      : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %0 {
+      transform.apply_conversion_patterns.nvgpu.nvgpu_to_nvvm
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+        {use_opaque_pointers = true}
+    } {legal_dialects = ["arith", "func", "llvm", "memref", "nvvm", "vector", "scf"], partial_conversion} : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Affine/transform-op-simplify-bounded-affine-ops.mlir b/mlir/test/Dialect/Affine/transform-op-simplify-bounded-affine-ops.mlir
index a30607b07ebeeb7..d8e77f6e5538e12 100644
--- a/mlir/test/Dialect/Affine/transform-op-simplify-bounded-affine-ops.mlir
+++ b/mlir/test/Dialect/Affine/transform-op-simplify-bounded-affine-ops.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt  %s -allow-unregistered-dialect \
-// RUN:     --test-transform-dialect-interpreter -verify-diagnostics \
+// RUN:     --transform-interpreter -verify-diagnostics \
 // RUN:     --split-input-file | FileCheck %s
 
 //     CHECK: func @simplify_min_max()
@@ -13,11 +13,13 @@ func.func @simplify_min_max() -> (index, index) {
   return %1, %2 : index, index
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["affine.min", "affine.max"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["test.some_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.affine.simplify_bounded_affine_ops %0 with [%1 : !transform.any_op] within [0] and [20] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["affine.min", "affine.max"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["test.some_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.affine.simplify_bounded_affine_ops %0 with [%1 : !transform.any_op] within [0] and [20] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -33,28 +35,34 @@ func.func @simplify_min_sequence() -> index {
   return %4 : index
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["affine.min"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["test.workgroup_id"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.match ops{["test.thread_id"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.affine.simplify_bounded_affine_ops %0 with [%1, %2 : !transform.any_op, !transform.any_op] within [0, 0] and [31, 31] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["affine.min"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["test.workgroup_id"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.match ops{["test.thread_id"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.affine.simplify_bounded_affine_ops %0 with [%1, %2 : !transform.any_op, !transform.any_op] within [0, 0] and [31, 31] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["affine.min"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error at +1 {{incorrect number of lower bounds, expected 0 but found 1}}
-  transform.affine.simplify_bounded_affine_ops %0 with [] within [0] and [] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["affine.min"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error at +1 {{incorrect number of lower bounds, expected 0 but found 1}}
+    transform.affine.simplify_bounded_affine_ops %0 with [] within [0] and [] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["affine.min"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error at +1 {{incorrect number of upper bounds, expected 0 but found 1}}
-  transform.affine.simplify_bounded_affine_ops %0 with [] within [] and [5] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["affine.min"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error at +1 {{incorrect number of upper bounds, expected 0 but found 1}}
+    transform.affine.simplify_bounded_affine_ops %0 with [] within [] and [5] : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
index 9b6d8ccfd6587d2..8d52d9900a793e2 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
@@ -1,11 +1,13 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter %s -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt --transform-interpreter %s -split-input-file -verify-diagnostics | FileCheck %s
 
 // Test One-Shot Bufferize.
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.bufferization.one_shot_bufferize %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.bufferization.one_shot_bufferize %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @test_function(
@@ -29,10 +31,12 @@ func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf3
 
 // Emit linalg.copy instead of memref.copy.
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.bufferization.one_shot_bufferize %0 {memcpy_op = "linalg.copy"} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.bufferization.one_shot_bufferize %0 {memcpy_op = "linalg.copy"} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @test_function(
@@ -57,11 +61,13 @@ func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf3
 
 // Test analysis of One-Shot Bufferize only.
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.bufferization.one_shot_bufferize %0
-      {test_analysis_only = true} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.bufferization.one_shot_bufferize %0
+        {test_analysis_only = true} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @test_function_analysis(
@@ -80,11 +86,13 @@ func.func @test_function_analysis(%A : tensor<?xf32>, %v : vector<4xf32>) -> (te
 // Test One-Shot Bufferize transform failure with an unknown op. This would be
 // allowed with `allow_unknown_ops`.
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @+1 {{bufferization failed}}
-  %1 = transform.bufferization.one_shot_bufferize %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @+1 {{bufferization failed}}
+    %1 = transform.bufferization.one_shot_bufferize %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
@@ -95,10 +103,12 @@ func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  // %arg1 is the module
-  %0 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    // %arg1 is the module
+    %0 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 module {
@@ -124,10 +134,12 @@ module {
 
 // Test we use identity layout at function boundaries.
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-  %0 = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap} %arg1
-    { bufferize_function_boundaries = true } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap} %arg1
+      { bufferize_function_boundaries = true } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK: func.func @matmul(
@@ -143,11 +155,13 @@ func.func @matmul(%A: tensor<12x9xf32>, %B: tensor<9x6xf32>, %C: tensor<12x6xf32
 
 // -----
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1 = transform.cast %0 : !transform.any_op to !transform.op<"tensor.empty">
     transform.bufferization.empty_tensor_to_alloc_tensor %1 : (!transform.op<"tensor.empty">) -> !transform.op<"bufferization.alloc_tensor">
+    transform.yield
+  }
 }
 
 // Expect `bufferization.empty_tensor_to_alloc_tensor` to replace the tensor.empty.
@@ -159,10 +173,12 @@ func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.bufferization.eliminate_empty_tensors %0 : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.bufferization.eliminate_empty_tensors %0 : !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @empty_tensor_elimination(
@@ -180,10 +196,12 @@ func.func @empty_tensor_elimination(
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.bufferization.buffer_loop_hoisting %0 : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.bufferization.buffer_loop_hoisting %0 : !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @buffer_loop_hoisting(
diff --git a/mlir/test/Dialect/GPU/barrier-elimination.mlir b/mlir/test/Dialect/GPU/barrier-elimination.mlir
index d3846f0b31e18cb..03f162a92390bb0 100644
--- a/mlir/test/Dialect/GPU/barrier-elimination.mlir
+++ b/mlir/test/Dialect/GPU/barrier-elimination.mlir
@@ -1,11 +1,13 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.gpu.eliminate_barriers
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.gpu.eliminate_barriers
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: @read_read_write
diff --git a/mlir/test/Dialect/GPU/subgroup-mma-vector-unroll.mlir b/mlir/test/Dialect/GPU/subgroup-mma-vector-unroll.mlir
index dded4993f467c01..03aba89c11afc0a 100644
--- a/mlir/test/Dialect/GPU/subgroup-mma-vector-unroll.mlir
+++ b/mlir/test/Dialect/GPU/subgroup-mma-vector-unroll.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 func.func @matmul(%lhs: memref<32x32xf32>, %rhs: memref<32x32xf32>, %out: memref<32x32xf32>) {
   %c8 = arith.constant 8 : index
@@ -33,11 +33,14 @@ func.func @matmul(%lhs: memref<32x32xf32>, %rhs: memref<32x32xf32>, %out: memref
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.gpu.unroll_vectors_subgroup_mma [16, 16, 8]
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %func_op = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.op<"func.func">
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.gpu.unroll_vectors_subgroup_mma [16, 16, 8]
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -90,9 +93,12 @@ func.func @gathered_matmul(%lhs: memref<32x32xf32>, %rhs: memref<32x32xf32>, %ou
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.gpu.unroll_vectors_subgroup_mma [8, 16, 4]
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %func_op = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.op<"func.func">
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.gpu.unroll_vectors_subgroup_mma [8, 16, 4]
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/GPU/transform-gpu-failing.mlir b/mlir/test/Dialect/GPU/transform-gpu-failing.mlir
index 51164e0a22f21dd..f81f8b64afdfc65 100644
--- a/mlir/test/Dialect/GPU/transform-gpu-failing.mlir
+++ b/mlir/test/Dialect/GPU/transform-gpu-failing.mlir
@@ -1,14 +1,16 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter --split-input-file  -canonicalize -cse --verify-diagnostics %s
+// RUN: mlir-opt --transform-interpreter --split-input-file  -canonicalize -cse --verify-diagnostics %s
 
 func.func @map_nested_forall_to_threads_not_gpu_launch() -> () {
   %1 = tensor.empty() : tensor<4xf32>
   return
 }
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["tensor.empty"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{Given target is not a gpu.launch}}
-  %1 = transform.gpu.map_nested_forall_to_threads %funcop block_dims = [1, 1, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["tensor.empty"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{Given target is not a gpu.launch}}
+    %1 = transform.gpu.map_nested_forall_to_threads %funcop block_dims = [1, 1, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -44,12 +46,15 @@ func.func @map_nested_forall_to_threads_excessive_threads(%x: memref<2 x 32 x f3
 
   return %y : memref<2 x 32 x f32>
 }
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{Trying to launch a GPU kernel with grid_dims = (1, 1, 1) block_dims = (1200, 9, 1). It is larger than the limits.}}
-  // expected-note @below {{"block_dims" is too large}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [1200, 9, 1] : (!transform.any_op) -> !transform.any_op
+
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{Trying to launch a GPU kernel with grid_dims = (1, 1, 1) block_dims = (1200, 9, 1). It is larger than the limits.}}
+    // expected-note @below {{"block_dims" is too large}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [1200, 9, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -86,11 +91,13 @@ func.func @map_nested_forall_to_threads_fewer_threads(%x: memref<2 x 32 x f32>,
   return %y : memref<2 x 32 x f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{the number of required parallel resources (blocks or threads) 6300 overflows the number of available resources 512}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [128, 4, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{the number of required parallel resources (blocks or threads) 6300 overflows the number of available resources 512}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [128, 4, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -112,11 +119,13 @@ func.func @map_nested_forall_to_threads_dynamic_trip_count(%x: memref<2 x 32 x f
   return %y : memref<2 x 32 x f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{requires statically sized, normalized forall op}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [128, 4, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{requires statically sized, normalized forall op}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [128, 4, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -132,14 +141,16 @@ func.func @map_nested_forall_to_threads_not_buffer(%x: tensor<32x32xf32>, %y: te
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  %forall, %tiled = transform.structured.tile_using_forall %matmul num_threads [2, 3, 1] (mapping = [ #gpu.thread<y>, #gpu.thread<x>, #gpu.thread<z> ] )
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{only bufferized scf.forall can be mapped}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [96, 4, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    %forall, %tiled = transform.structured.tile_using_forall %matmul num_threads [2, 3, 1] (mapping = [ #gpu.thread<y>, #gpu.thread<x>, #gpu.thread<z> ] )
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{only bufferized scf.forall can be mapped}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [96, 4, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -150,11 +161,14 @@ func.func @map_forall_to_blocks_not_gpu_launch() -> () {
   %1 = tensor.empty() : tensor<4xf32>
   return
 }
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["tensor.empty"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{Given target is not gpu.launch}}
-  %1 = transform.gpu.map_forall_to_blocks %funcop : (!transform.any_op) -> !transform.any_op
+
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["tensor.empty"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{Given target is not gpu.launch}}
+    %1 = transform.gpu.map_forall_to_blocks %funcop : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -187,11 +201,13 @@ func.func @map_forall_to_blocks_not_unique(%x: memref<2 x 32 x f32>, %y: memref<
   return %y : memref<2 x 32 x f32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{could not find a unique topLevel scf.forall}}
-  %1 = transform.gpu.map_forall_to_blocks %funcop : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{could not find a unique topLevel scf.forall}}
+    %1 = transform.gpu.map_forall_to_blocks %funcop : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -220,11 +236,13 @@ func.func @map_forall_to_blocks_large_loop(%x: memref<2 x 32 x f32>, %y: memref<
   return %y : memref<2 x 32 x f32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{could not find a unique topLevel scf.forall}}
-  %1 = transform.gpu.map_forall_to_blocks %funcop { generate_gpu_launch } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{could not find a unique topLevel scf.forall}}
+    %1 = transform.gpu.map_forall_to_blocks %funcop { generate_gpu_launch } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -241,11 +259,13 @@ func.func @map_forall_to_blocks_large_loop(%x: memref<2 x 32 x f32>, %y: memref<
   return %y : memref<2 x 32 x f32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{Trying to launch a GPU kernel with grid_dims = (65535, 65535, 1) block_dims = (1, 1, 1). It is larger than the limits.}}
-  %1 = transform.gpu.map_forall_to_blocks %funcop generate_gpu_launch : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{Trying to launch a GPU kernel with grid_dims = (65535, 65535, 1) block_dims = (1, 1, 1). It is larger than the limits.}}
+    %1 = transform.gpu.map_forall_to_blocks %funcop generate_gpu_launch : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -268,11 +288,13 @@ func.func @saxpy2d_singleloop(%x: !type, %y: !type, %stream : !gpu.async.token)
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{cannot mix different mapping types, use nesting}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 32, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{cannot mix different mapping types, use nesting}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 32, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -295,11 +317,13 @@ func.func @saxpy2d_singleloop(%x: !type, %y: !type, %stream : !gpu.async.token)
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{duplicate attribute, cannot map different loops to the same mapping id}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 32, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{duplicate attribute, cannot map different loops to the same mapping id}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 32, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -322,23 +346,25 @@ func.func @saxpy2d_singleloop(%x: !type, %y: !type, %stream : !gpu.async.token)
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{cannot mix linear and non-linear mapping modes}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 32, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{cannot mix linear and non-linear mapping modes}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 32, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
 
 // expected-note @below {{when applied to this payload op}}
-module {
-transform.sequence failures(propagate) {
-^bb1(%op: !transform.any_op):
-  // expected-error @below {{could not find a unique topLevel scf.forall}}
-  %gpu_launch = transform.gpu.map_forall_to_blocks %op generate_gpu_launch grid_dims = [1, 1, 1]
-    : (!transform.any_op) -> !transform.any_op
-}
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%op: !transform.any_op {transform.consumed}) {
+    // expected-error @below {{could not find a unique topLevel scf.forall}}
+    %gpu_launch = transform.gpu.map_forall_to_blocks %op generate_gpu_launch grid_dims = [1, 1, 1]
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -350,11 +376,13 @@ func.func public @improperly_sized_grid_dims(%arg0: memref<32x32xf32>, %arg1: me
   return
 }
 
-transform.sequence  failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %arg0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{transform requires empty or size-3 grid_dims}}
-  %5 = transform.gpu.map_forall_to_blocks %arg1 generate_gpu_launch grid_dims = [50, 16] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %arg0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{transform requires empty or size-3 grid_dims}}
+    %5 = transform.gpu.map_forall_to_blocks %arg1 generate_gpu_launch grid_dims = [50, 16] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -366,11 +394,13 @@ func.func public @missing_mapping_attribute(%arg0: memref<32x32xf32>, %arg1: mem
   return
 }
 
-transform.sequence  failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %arg0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{scf.forall op requires a mapping attribute}}
-  %5 = transform.gpu.map_forall_to_blocks %arg1 generate_gpu_launch grid_dims = [50, 16, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %arg0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{scf.forall op requires a mapping attribute}}
+    %5 = transform.gpu.map_forall_to_blocks %arg1 generate_gpu_launch grid_dims = [50, 16, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -382,11 +412,13 @@ func.func public @not_a_block_mapping_attribute(%arg0: memref<32x32xf32>, %arg1:
   return
 }
 
-transform.sequence  failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %arg0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{scf.forall op requires a mapping attribute of kind 'block'}}
-  %5 = transform.gpu.map_forall_to_blocks %arg1 generate_gpu_launch grid_dims = [50, 16, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %arg0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{scf.forall op requires a mapping attribute of kind 'block'}}
+    %5 = transform.gpu.map_forall_to_blocks %arg1 generate_gpu_launch grid_dims = [50, 16, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -411,9 +443,11 @@ func.func @not_a_thread_or_warp_mapping_attribute(%x: memref<2 x 32 x f32>, %y:
   return %y : memref<2 x 32 x f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{scf.forall op requires a mapping attribute of kind 'thread' or 'warp'}}
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [1, 1, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{scf.forall op requires a mapping attribute of kind 'thread' or 'warp'}}
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [1, 1, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/GPU/transform-gpu.mlir b/mlir/test/Dialect/GPU/transform-gpu.mlir
index de42c266c34f457..72572c6a38de127 100644
--- a/mlir/test/Dialect/GPU/transform-gpu.mlir
+++ b/mlir/test/Dialect/GPU/transform-gpu.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter --split-input-file  -canonicalize -cse %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter --split-input-file  -canonicalize -cse %s | FileCheck %s
 
 !type = memref<2 x 32 x f32>
 !type1d = memref<32 x f32>
@@ -30,10 +30,12 @@ func.func @blocks_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream :
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_forall_to_blocks %funcop grid_dims = [12, 9, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_forall_to_blocks %funcop grid_dims = [12, 9, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -41,7 +43,7 @@ transform.sequence failures(propagate) {
 !type = memref<2 x 32 x f32>
 !type1d = memref<32 x f32>
 
-// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 128)> 
+// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 128)>
 
 // CHECK-LABEL: func.func @warpgroup_3d(
 // CHECK-SAME:    %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
@@ -80,10 +82,12 @@ func.func @warpgroup_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [512, 2, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [512, 2, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -91,7 +95,7 @@ transform.sequence failures(propagate) {
 !type = memref<2 x 32 x f32>
 !type1d = memref<32 x f32>
 
-// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 16)> 
+// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 16)>
 
 // CHECK-LABEL: func.func @warp_3d(
 // CHECK-SAME:    %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
@@ -131,10 +135,12 @@ func.func @warp_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !g
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [64, 4, 3] warp_size = 16: (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [64, 4, 3] warp_size = 16: (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -185,10 +191,12 @@ func.func @threads_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream :
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [12, 9, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [12, 9, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -224,11 +232,13 @@ func.func @saxpy4d(%x: !type4d, %y: !type4d, %alpha : f32) -> !type4d {
   return %y : !type4d
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  %gpuLaunch = transform.gpu.map_forall_to_blocks %funcop { generate_gpu_launch } : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %gpuLaunch block_dims = [32, 4, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    %gpuLaunch = transform.gpu.map_forall_to_blocks %funcop { generate_gpu_launch } : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %gpuLaunch block_dims = [32, 4, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -258,10 +268,12 @@ func.func @saxpy2d_no_barrier(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [12, 9, 1] sync_after_distribute = false : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [12, 9, 1] sync_after_distribute = false : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -290,10 +302,12 @@ func.func @saxpy2d_singleloop(%x: !type, %y: !type, %stream : !gpu.async.token)
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 1, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 1, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -326,10 +340,12 @@ func.func @saxpy3d_fold_id_z(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %s
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [12, 9, 1] sync_after_distribute = false : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [12, 9, 1] sync_after_distribute = false : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -381,10 +397,12 @@ func.func @warpgroup_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %st
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 8, 4] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 8, 4] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -435,10 +453,12 @@ func.func @warp_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 8, 4] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop block_dims = [32, 8, 4] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -511,11 +531,13 @@ func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f3
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %funcop
-    block_dims = [18, 11, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %funcop
+      block_dims = [18, 11, 1] : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -565,10 +587,12 @@ func.func @block_linear_existing_launch(
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_forall_to_blocks %funcop grid_dims = [12, 9, 1] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["gpu.launch"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_forall_to_blocks %funcop grid_dims = [12, 9, 1] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -610,16 +634,18 @@ func.func @block_linear_generate_launch(
   return %y : !type
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_forall_to_blocks %funcop generate_gpu_launch : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %funcop = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_forall_to_blocks %funcop generate_gpu_launch : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
 
-#map = affine_map<(d0) -> (d0 *  128)>                             
-#map1 = affine_map<(d0) -> (d0 * 32)> 
+#map = affine_map<(d0) -> (d0 *  128)>
+#map1 = affine_map<(d0) -> (d0 * 32)>
 
 // CHECK-DAG: #[[$MAPB:.*]] = affine_map<(d0) -> (d0 * 128)>
 // CHECK-DAG: #[[$MAPW:.*]] = affine_map<(d0, d1, d2) -> (d2 * 32 + ((d0 + d1 * 4) floordiv 32) * 32)>
@@ -629,9 +655,9 @@ func.func @simple_fill(%arg0: memref<128xf32>) -> memref<128xf32> {
   %c0 = arith.constant 0 : index
   %cst = arith.constant dense<0.000000e+00> : vector<32xf32>
 //       CHECK:   %[[C1:.*]] = arith.constant 1 : index
-//       CHECK:   %[[C4:.*]] = arith.constant 4 : index       
-//       CHECK:   %[[C8:.*]] = arith.constant 8 : index        
-//       CHECK:   gpu.launch 
+//       CHECK:   %[[C4:.*]] = arith.constant 4 : index
+//       CHECK:   %[[C8:.*]] = arith.constant 8 : index
+//       CHECK:   gpu.launch
   scf.forall (%arg1) in (1) {
 //       CHECK:     %[[BIDX:.*]] = gpu.block_id  x
 //       CHECK:     %[[BLX:.*]] = affine.apply #[[$MAPB]](%[[BIDX]])
@@ -654,12 +680,14 @@ func.func @simple_fill(%arg0: memref<128xf32>) -> memref<128xf32> {
   return %arg0 : memref<128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %func = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
-  %gpu_launch = transform.gpu.map_forall_to_blocks %func generate_gpu_launch 
-    : (!transform.any_op) -> !transform.any_op
-  transform.gpu.map_nested_forall_to_threads %gpu_launch block_dims = [4, 8, 4] 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %func = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
+    %gpu_launch = transform.gpu.map_forall_to_blocks %func generate_gpu_launch
+      : (!transform.any_op) -> !transform.any_op
+    transform.gpu.map_nested_forall_to_threads %gpu_launch block_dims = [4, 8, 4]
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir b/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir
index c25212fbe98782b..cc9759f62431f1f 100644
--- a/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir
+++ b/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir
@@ -2,7 +2,9 @@
 
 // RUN: mlir-opt %s -test-lower-to-llvm -cse | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter="transform-library-paths=%p/lower-to-llvm-transform-symbol-def.mlir debug-payload-root-tag=payload" \
+// RUN: mlir-opt %s \
+// RUN:   -transform-preload-library="transform-library-paths=%p/lower-to-llvm-transform-symbol-def.mlir" \
+// RUN:   -transform-interpreter="debug-payload-root-tag=payload" \
 // RUN:   -test-transform-dialect-erase-schedule -cse \
 // RUN: | FileCheck %s
 
@@ -56,9 +58,9 @@ func.func @subview(%0 : memref<64x4xf32, strided<[4, 1], offset: 0>>, %arg0 : in
 module @named_inclusion_in_named attributes { transform.with_named_sequence } {
   transform.named_sequence private @lower_to_cpu(!transform.any_op {transform.consumed}) -> !transform.any_op
 
-  transform.sequence failures(propagate) {
-  ^bb1(%toplevel_module: !transform.any_op):
+  transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.consumed}) {
     %m2 = transform.include @lower_to_cpu failures(suppress) (%toplevel_module) 
       : (!transform.any_op) -> (!transform.any_op)
+    transform.yield
   }
 }
diff --git a/mlir/test/Dialect/LLVM/transform-e2e.mlir b/mlir/test/Dialect/LLVM/transform-e2e.mlir
index 86691e15dc58f42..adbbbba32a40f8f 100644
--- a/mlir/test/Dialect/LLVM/transform-e2e.mlir
+++ b/mlir/test/Dialect/LLVM/transform-e2e.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -test-transform-dialect-erase-schedule --test-lower-to-llvm --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter -test-transform-dialect-erase-schedule --test-lower-to-llvm --split-input-file | FileCheck %s
 
 // CHECK-LABEL: llvm.func @matmul_tensors
 func.func @matmul_tensors(
@@ -12,29 +12,31 @@ func.func @matmul_tensors(
   return %0 : tensor<2x6xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [2, 2, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  %2 = get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op
-  %b = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap}
-      %module_op {bufferize_function_boundaries = true}
-      : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [2, 2, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    %2 = transform.get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op
+    %b = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap}
+        %module_op {bufferize_function_boundaries = true}
+        : (!transform.any_op) -> !transform.any_op
 
-  %f = transform.structured.match ops{["func.func"]} in %b
-    : (!transform.any_op) -> !transform.any_op
+    %f = transform.structured.match ops{["func.func"]} in %b
+      : (!transform.any_op) -> !transform.any_op
 
-  // TODO: group these lower-level controls into various properly named vector
-  // lowering TD macros.
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
-    transform.apply_patterns.vector.transfer_permutation_patterns
-    transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerparallel"
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
-    transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
-    transform.apply_patterns.vector.lower_transfer max_transfer_rank = 1
-    transform.apply_patterns.vector.lower_shape_cast
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_1d"
-  } : !transform.any_op
+    // TODO: group these lower-level controls into various properly named vector
+    // lowering TD macros.
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+      transform.apply_patterns.vector.transfer_permutation_patterns
+      transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerparallel"
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
+      transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
+      transform.apply_patterns.vector.lower_transfer max_transfer_rank = 1
+      transform.apply_patterns.vector.lower_shape_cast
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_1d"
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir b/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir
index b2470ed7b748042..a6431996353121c 100644
--- a/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir
+++ b/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s
 
 // Check that the im2col patterns are properly connected with the
 // transform dialect.
@@ -15,11 +15,13 @@ func.func @conv_non_static(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<3x3x4x16xf3
     return %0 : tensor<?x?x?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error at below {{failed to apply}}
-  %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error at below {{failed to apply}}
+    %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -87,12 +89,14 @@ func.func @conv_16433136(%arg0: tensor<1x16x16x4xf32>, %arg1: tensor<3x3x4x16xf3
     return %0 : tensor<1x14x14x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
-  transform.print %transformed {name = "transformed"}: !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
+    transform.print %transformed {name = "transformed"}: !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -166,10 +170,12 @@ func.func @depthwise_conv_hwc_114x16x3(%input: tensor<1x114x114x16xf32>, %filter
     return %0 : tensor<1x112x112x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -208,10 +214,12 @@ func.func @batch_nhwc_conv(%arg0: tensor<8x16x16x4xf32>, %arg1: tensor<3x3x4x16x
     return %0 : tensor<8x14x14x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -271,10 +279,12 @@ func.func @batch_nchw_conv(%arg0: tensor<8x4x16x16xf32>, %arg1: tensor<16x4x3x3x
     return %0 : tensor<8x16x14x14xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -339,12 +349,14 @@ func.func @conv_2d_nhwc_fhwc(%arg0: tensor<1x16x16x4xf32>, %arg1: tensor<16x3x3x
     return %0 : tensor<1x14x14x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_fhwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
-  transform.print %transformed {name = "transformed"}: !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_fhwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
+    transform.print %transformed {name = "transformed"}: !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -377,12 +389,14 @@ func.func @conv_integer_extend(%arg0: tensor<1x16x16x4xi8>, %arg1: tensor<3x3x4x
     return %0 : tensor<1x14x14x16xi32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
-  transform.print %transformed {name = "transformed"}: !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
+    transform.print %transformed {name = "transformed"}: !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -413,12 +427,14 @@ func.func @conv_complex(%arg0: tensor<1x16x16x4xcomplex<f32>>, %arg1: tensor<3x3
     return %0 : tensor<1x14x14x16xcomplex<f32>>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
-  transform.print %transformed {name = "transformed"}: !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
+    transform.print %transformed {name = "transformed"}: !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -454,12 +470,14 @@ func.func @conv_complex_extended(%arg0: tensor<1x16x16x4xcomplex<f32>>, %arg1: t
     return %0 : tensor<1x14x14x16xcomplex<f32>>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
-  transform.print %transformed {name = "transformed"}: !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
+    transform.print %transformed {name = "transformed"}: !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -493,10 +511,12 @@ func.func @conv_complex_f16_extended(%arg0: tensor<1x16x16x4xcomplex<f32>>, %arg
     return %0 : tensor<1x14x14x16xcomplex<f32>>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
-  transform.print %transformed {name = "transformed"}: !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
+    transform.print %transformed {name = "transformed"}: !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/generalize-tensor-pack-tile.mlir b/mlir/test/Dialect/Linalg/generalize-tensor-pack-tile.mlir
index a8c3c992e11c8b4..d63433248ab1e00 100644
--- a/mlir/test/Dialect/Linalg/generalize-tensor-pack-tile.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-tensor-pack-tile.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -split-input-file --test-transform-dialect-interpreter --canonicalize --test-linalg-transform-patterns="test-generalize-tensor-pack"  %s | FileCheck %s
+// RUN: mlir-opt -split-input-file --transform-interpreter --canonicalize --test-linalg-transform-patterns="test-generalize-tensor-pack"  %s | FileCheck %s
 
 func.func @KCRS_to_KCRSsr(%arg0: tensor<1x1x128x64xf32>, %arg1: tensor<1x1x4x8x8x32xf32>) -> tensor<1x1x4x8x8x32xf32> {
   %0 = tensor.pack %arg0 inner_dims_pos = [3, 2] inner_tiles = [8, 32] into %arg1 : tensor<1x1x128x64xf32> -> tensor<1x1x4x8x8x32xf32>
@@ -24,10 +24,12 @@ func.func @KCRS_to_KCRSsr(%arg0: tensor<1x1x128x64xf32>, %arg1: tensor<1x1x4x8x8
 // CHECK-SAME:          permutation = [1, 0]
 // CHECK:             %{{.+}} = tensor.insert_slice %[[TRANSP]] into %{{.+}}
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -53,10 +55,12 @@ func.func @pad_and_pack(%arg0: tensor<13x15xf32>, %arg1: tensor<2x8x8x2xf32>, %a
 // CHECK-SAME:      permutation = [0, 1]
 // CHECK:         %{{.+}} = tensor.insert_slice %[[TRANSP]] into %{{.+}}
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -86,8 +90,10 @@ func.func @KC_to_CKkc(%arg0: tensor<128x256xf32>, %arg1: tensor<32x4x32x8xf32>)
 // CHECK-SAME:          [0, 0, 0, 0] [1, 1, 32, 8] [1, 1, 1, 1] : tensor<32x8xf32> into tensor<1x1x32x8xf32>
 // CHECK:             %{{.+}} = tensor.insert_slice %[[SUB_ITER]] into %{{[a-zA-Z0-9]+}}
 // CHECK-SAME:          [%[[C]], %[[K]], 0, 0] [1, 1, 32, 8] [1, 1, 1, 1] : tensor<1x1x32x8xf32> into tensor<32x4x32x8xf32>
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/generalize-tensor-unpack-tile.mlir b/mlir/test/Dialect/Linalg/generalize-tensor-unpack-tile.mlir
index 5d38844568e1602..0e27c6a783e6f1a 100644
--- a/mlir/test/Dialect/Linalg/generalize-tensor-unpack-tile.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-tensor-unpack-tile.mlir
@@ -1,14 +1,16 @@
-// RUN: mlir-opt -split-input-file --test-transform-dialect-interpreter --canonicalize --test-linalg-transform-patterns="test-generalize-tensor-unpack"  %s | FileCheck %s
+// RUN: mlir-opt -split-input-file --transform-interpreter --canonicalize --test-linalg-transform-patterns="test-generalize-tensor-unpack"  %s | FileCheck %s
 
 func.func @KCRSsr_to_KCRS(%arg0: tensor<1x1x4x8x8x32xf32>, %arg1: tensor<1x1x128x64xf32>) -> tensor<1x1x128x64xf32> {
   %0 = tensor.unpack %arg0 inner_dims_pos = [3, 2] inner_tiles = [8, 32] into %arg1 : tensor<1x1x4x8x8x32xf32> -> tensor<1x1x128x64xf32>
   return %0 : tensor<1x1x128x64xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 32, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 // CHECK-DAG:   #[[MAP0:.+]] = affine_map<(d0) -> (d0 floordiv 32)>
 // CHECK-DAG:   #[[MAP1:.+]] = affine_map<(d0) -> (d0 floordiv 8)>
@@ -67,10 +69,12 @@ func.func @unpack_and_extract_slice(%arg0: tensor<2x8x8x2xf32>, %arg1: tensor<13
 // CHECK:             %[[INSERT2:.+]] = tensor.insert_slice %[[INSERT1]] into %{{[a-zA-Z0-9]+}}
 // CHECK-SAME:          [%[[I]], %[[J]]] [%[[OUT_I_SZ]], %[[OUT_J_SZ]]] [1, 1]
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:2 = transform.structured.tile_using_for %0 [8, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -101,8 +105,10 @@ func.func @CKkc_to_KC(%arg0: tensor<32x4x32x8xf32>, %arg1: tensor<128x256xf32>)
 // CHECK-SAME:          [%[[K]], %[[C]]] [32, 8] [1, 1]
 
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:2 = transform.structured.tile_using_for %0 [32, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/hoisting.mlir b/mlir/test/Dialect/Linalg/hoisting.mlir
index 11bf4b58b95c82e..3623952a08df024 100644
--- a/mlir/test/Dialect/Linalg/hoisting.mlir
+++ b/mlir/test/Dialect/Linalg/hoisting.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt  -test-transform-dialect-interpreter -canonicalize --split-input-file --allow-unregistered-dialect %s | FileCheck %s
+// RUN: mlir-opt  -transform-interpreter -canonicalize --split-input-file --allow-unregistered-dialect %s | FileCheck %s
 
 // CHECK-LABEL: func @hoist_vector_transfer_pairs(
 //  CHECK-SAME:   %[[MEMREF0:[a-zA-Z0-9]*]]: memref<?x?xf32>,
@@ -74,12 +74,14 @@ func.func @hoist_vector_transfer_pairs(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -163,12 +165,14 @@ func.func @hoist_vector_transfer_pairs_disjoint(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -208,12 +212,14 @@ func.func @hoist_vector_transfer_pairs_in_affine_loops(%memref0: memref<64x64xi3
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -297,12 +303,14 @@ func.func @hoist_vector_transfer_pairs_tensor(
         tensor<?x?xf32>, tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_tensor_subsets %0
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_tensor_subsets %0
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -392,12 +400,14 @@ func.func @hoist_vector_transfer_pairs_disjoint_tensor(
   return %0#0,  %0#1, %0#2, %0#3 : tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_tensor_subsets %0
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_tensor_subsets %0
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -509,12 +519,14 @@ func.func @hoist_vector_transfer_pairs_tensor_and_slices(
   return %0#0, %0#1, %0#2 : tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_tensor_subsets %0
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_tensor_subsets %0
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -556,12 +568,14 @@ func.func @hoist_vector_transfer_write_pairs_disjoint_tensor(
   return %1 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_tensor_subsets %0
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_tensor_subsets %0
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -669,12 +683,14 @@ func.func @hoist_vector_transfer_pairs_tensor_and_slices_static_large_tensor(
   return %0#0, %0#1, %0#2 : tensor<100x100xf32>, tensor<200x200xf32>, tensor<300x300xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_tensor_subsets %0
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_tensor_subsets %0
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -715,12 +731,14 @@ func.func @hoist_vector_transfer_read() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -749,12 +767,14 @@ func.func @non_matching_transfers(%m: memref<6x1x7x32xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -786,12 +806,14 @@ func.func @no_hoisting_collapse_shape(%in_0: memref<1x20x1xi32>, %1: memref<9x1x
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -820,12 +842,14 @@ func.func @no_hoisting_collapse_shape_2(%vec: vector<1x12x1xi32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -865,12 +889,14 @@ func.func @no_hoisting_write_to_memref(%rhs: i32, %arg1: vector<1xi32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -919,12 +945,14 @@ func.func @hoist_vector_transfer_pairs_disjoint_dynamic(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -955,12 +983,14 @@ func.func @hoist_vector_transfer_pairs_overlapping_dynamic(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -997,10 +1027,12 @@ func.func @hoist_vector_transfer_pairs_disjoint_dynamic(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.hoist_redundant_vector_transfers %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_redundant_vector_transfers %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir b/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
index 1da092ab42ad779..9489aadac843d7b 100644
--- a/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
+++ b/mlir/test/Dialect/Linalg/match-ops-interpreter.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --pass-pipeline="builtin.module(test-transform-dialect-interpreter{debug-payload-root-tag=start_here})" --split-input-file --verify-diagnostics
+// RUN: mlir-opt %s --pass-pipeline="builtin.module(transform-interpreter{debug-payload-root-tag=start_here})" --split-input-file --verify-diagnostics
 
 module attributes { transform.with_named_sequence } {
   transform.named_sequence @print_structured(%arg0: !transform.any_op {transform.readonly}) {
@@ -15,11 +15,11 @@ module attributes { transform.with_named_sequence } {
   }
 
   // Entry point. Match any structured operation and emit at remark.
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0
         @match_structured_empty -> @print_structured
         : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload() attributes { transform.target_tag = "start_here" } {
@@ -60,11 +60,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield %arg0 : !transform.any_op
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0
         @print_in_matcher -> @do_nothing
         : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload() attributes { transform.target_tag = "start_here" } {
@@ -99,11 +99,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield %0 : !transform.any_op
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0
         @match_structured_suppress -> @do_nothing
         : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload() attributes { transform.target_tag = "start_here" } {
@@ -144,11 +144,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield %0 : !transform.any_op
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0
         @match_structured_body_passthrough -> @print_passthrough
         : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload(%in: tensor<2xf32>, %out: tensor<2xf32>) attributes { transform.target_tag = "start_here" } {
@@ -194,11 +194,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield %0 : !transform.any_op
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0
         @match_structured_body_reduction -> @print_reduction
         : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload(%lhs: tensor<2x4xf32>, %rhs: tensor<4x3xf32>, %out: tensor<2x3xf32>) attributes { transform.target_tag = "start_here" } {
@@ -301,10 +301,10 @@ module attributes { transform.with_named_sequence } {
     transform.yield %0 : !transform.any_op
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %0 = transform.foreach_match in %arg0 @match_dimension_capture -> @do_nothing : (!transform.any_op) -> !transform.any_op
     %1 = transform.foreach_match in %0 @match_dimension_sizes -> @print_dimension_size_match : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload(%lhs: tensor<2x4xf32>, %rhs: tensor<4x3xf32>, %out: tensor<2x3xf32>) attributes { transform.target_tag = "start_here" } {
@@ -386,12 +386,12 @@ module attributes { transform.with_named_sequence } {
     transform.yield %arg0 : !transform.any_op
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %0 = transform.foreach_match in %arg0 @match_all_reduction -> @print_all_reduction : (!transform.any_op) -> !transform.any_op
     %1 = transform.foreach_match in %0 @match_all_parallel -> @print_all_parallel : (!transform.any_op) -> !transform.any_op
     %2 = transform.foreach_match in %1 @match_last_reduction -> @print_last_reduction : (!transform.any_op) -> !transform.any_op
     %3 = transform.foreach_match in %2 @match_parallel_except_last -> @print_parallel_except_last : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 
   func.func @payload(%lhs: tensor<2x4xf32>, %rhs: tensor<4x3xf32>, %out: tensor<2x3xf32>) attributes { transform.target_tag = "start_here" } {
@@ -438,8 +438,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0 @match_bitwidth -> @print_bitwidth : (!transform.any_op) -> !transform.any_op
     transform.yield
   }
@@ -480,8 +479,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0 @match_init -> @print_init : (!transform.any_op) -> !transform.any_op
     transform.yield
   }
@@ -557,8 +555,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %0 = transform.foreach_match in %arg0 @match_init_0_permutation -> @print_init_0_permutation : (!transform.any_op) -> !transform.any_op
     %1 = transform.foreach_match in %0 @match_init_1_permutation -> @print_init_1_permutation : (!transform.any_op) -> !transform.any_op
     %2 = transform.foreach_match in %1 @match_init_2_projected_permutation -> @print_init_2_projected_permutation : (!transform.any_op) -> !transform.any_op
@@ -626,8 +623,7 @@ module attributes { transform.with_named_sequence } {
   }
 
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %0 = transform.foreach_match in %arg0 @match_num_io -> @print_num_io : (!transform.any_op) -> !transform.any_op
     transform.yield
   }
@@ -687,8 +683,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %0 = transform.foreach_match in %arg0 @match_rank -> @print_rank : (!transform.any_op) -> !transform.any_op
     transform.yield
   }
@@ -757,8 +752,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %0 = transform.foreach_match in %arg0 @match_single_result -> @print_single_result : (!transform.any_op) -> !transform.any_op
     %1 = transform.foreach_match in %0 @match_result_value -> @print_result_value : (!transform.any_op) -> !transform.any_op
     %2 = transform.foreach_match in %1 @match_any_result -> @print_any_result : (!transform.any_op) -> !transform.any_op
@@ -838,8 +832,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %3 = transform.foreach_match in %arg0 @match_input_indexing_map -> @print_indexing_map_1 : (!transform.any_op) -> !transform.any_op
     %4 = transform.foreach_match in %3 @match_init_indexing_map -> @print_indexing_map_2 : (!transform.any_op) -> !transform.any_op
     transform.yield
@@ -889,8 +882,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %3 = transform.foreach_match in %arg0 @match_contraction -> @print_contraction : (!transform.any_op) -> !transform.any_op
     transform.yield
   }
@@ -986,8 +978,7 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     %3 = transform.foreach_match in %arg0 @match_convolution -> @print_convolution : (!transform.any_op) -> !transform.any_op
     transform.yield
   }
diff --git a/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir b/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir
index c1ef5a794810636..c3ac69f65b7c6cb 100644
--- a/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir
+++ b/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --split-input-file --test-transform-dialect-interpreter="enable-expensive-checks=1" %s | FileCheck %s
+// RUN: mlir-opt --split-input-file --transform-interpreter %s | FileCheck %s
 
 // CHECK-LABEL: func @matmul_divisible
 //       CHECK:   scf.forall
@@ -39,81 +39,83 @@ func.func @matmul_divisible(%A: tensor<1024x1024xf32>,
   return %1 : tensor<1024x1024xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  // Fuse linalg.fill into linalg.matmul and tile.
-  %matmul_op = transform.structured.match ops{["linalg.matmul"]} in %arg1
-      : (!transform.any_op) -> !transform.any_op
-  %fill_op = transform.structured.match ops{["linalg.fill"]} in %arg1
-      : (!transform.any_op) -> !transform.any_op
-  %tiled_matmul_op, %forall_op = transform.structured.tile_using_forall %matmul_op num_threads [] tile_sizes [128, 128](mapping = [#gpu.block<y>, #gpu.block<x>])
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %fused_op, %new_containing_op = transform.structured.fuse_into_containing_op %fill_op into %forall_op
-      : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
-
-  // Tile linalg.matmul a second time.
-  %tiled_linalg_op, %loops = transform.structured.tile_using_for %tiled_matmul_op[0, 0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-
-  // Pad linalg.matmul.
-  %padded, %pad, %copy_back = transform.structured.pad %tiled_linalg_op
-      {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-       padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1],
-       copy_back_op = "linalg.copy"}
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
-
-  // Map and tile tensor.pad.
-  %pad_forall_op, %tiled_pad_op = transform.structured.gpu.map_copy_to_threads
-      %pad total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.foreach %pad_forall_op : !transform.any_op {
-  ^bb2(%arg2 : !transform.any_op):
-    %if_op = transform.structured.match ops{["scf.if"]} in %arg2
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    // Fuse linalg.fill into linalg.matmul and tile.
+    %matmul_op = transform.structured.match ops{["linalg.matmul"]} in %arg1
+        : (!transform.any_op) -> !transform.any_op
+    %fill_op = transform.structured.match ops{["linalg.fill"]} in %arg1
+        : (!transform.any_op) -> !transform.any_op
+    %tiled_matmul_op, %forall_op = transform.structured.tile_using_forall %matmul_op num_threads [] tile_sizes [128, 128](mapping = [#gpu.block<y>, #gpu.block<x>])
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %fused_op, %new_containing_op = transform.structured.fuse_into_containing_op %fill_op into %forall_op
+        : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    // Tile linalg.matmul a second time.
+    %tiled_linalg_op, %loops = transform.structured.tile_using_for %tiled_matmul_op[0, 0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    // Pad linalg.matmul.
+    %padded, %pad, %copy_back = transform.structured.pad %tiled_linalg_op
+        {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+         padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1],
+         copy_back_op = "linalg.copy"}
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+
+    // Map and tile tensor.pad.
+    %pad_forall_op, %tiled_pad_op = transform.structured.gpu.map_copy_to_threads
+        %pad total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.foreach %pad_forall_op : !transform.any_op {
+    ^bb2(%arg2 : !transform.any_op):
+      %if_op = transform.structured.match ops{["scf.if"]} in %arg2
+          : (!transform.any_op) -> !transform.any_op
+      // TODO: The scf.if can be avoided with 0x... tensors.
+      transform.scf.take_assumed_branch %if_op take_else_branch
+          : (!transform.any_op) -> ()
+    }
+
+    // Map and tile copy back.
+    %copy_forall_op, %tiled_copy_op = transform.structured.gpu.map_copy_to_threads
+        %copy_back total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    // Apply masked vectorization to padding ops.
+    transform.structured.vectorize %tiled_pad_op vector_sizes [128, 4]
+        : !transform.any_op
+
+    // Assign shared memory buffer to padding.
+    %buffer, %new_ops = transform.structured.bufferize_to_allocation
+        %pad_forall_op {memory_space = 3, bufferize_destination_only, emit_dealloc}
+        : !transform.any_op
+
+    // Bufferize.
+    %func_op_1 = transform.structured.match ops{["func.func"]} in %arg1
+        : (!transform.any_op) -> !transform.any_op
+    transform.bufferization.eliminate_empty_tensors %func_op_1 : !transform.any_op
+    transform.apply_dce to %func_op_1 : !transform.any_op
+    transform.apply_cse to %func_op_1 : !transform.any_op
+    %bufferized = transform.bufferization.one_shot_bufferize
+        layout{IdentityLayoutMap} %arg1 {bufferize_function_boundaries=true}
         : (!transform.any_op) -> !transform.any_op
-    // TODO: The scf.if can be avoided with 0x... tensors.
-    transform.scf.take_assumed_branch %if_op take_else_branch
-        : (!transform.any_op) -> ()
-  }
 
-  // Map and tile copy back.
-  %copy_forall_op, %tiled_copy_op = transform.structured.gpu.map_copy_to_threads
-      %copy_back total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-
-  // Apply masked vectorization to padding ops.
-  transform.structured.vectorize %tiled_pad_op vector_sizes [128, 4]
-      : !transform.any_op
-
-  // Assign shared memory buffer to padding.
-  %buffer, %new_ops = transform.structured.bufferize_to_allocation
-      %pad_forall_op {memory_space = 3, bufferize_destination_only, emit_dealloc}
-      : !transform.any_op
-
-  // Bufferize.
-  %func_op_1 = transform.structured.match ops{["func.func"]} in %arg1
-      : (!transform.any_op) -> !transform.any_op
-  transform.bufferization.eliminate_empty_tensors %func_op_1 : !transform.any_op
-  transform.apply_dce to %func_op_1 : !transform.any_op
-  transform.apply_cse to %func_op_1 : !transform.any_op
-  %bufferized = transform.bufferization.one_shot_bufferize
-      layout{IdentityLayoutMap} %arg1 {bufferize_function_boundaries=true}
-      : (!transform.any_op) -> !transform.any_op
-
-  // Apply vectorization to copy back from shared memory.
-  // TODO: Find a way to retain the handle to linalg.copy throughout
-  // bufferization.
-  %func_op_2 = transform.structured.match ops{["func.func"]} in %bufferized
-      : (!transform.any_op) -> !transform.any_op
-  %bufferized_copy_back = transform.structured.match ops{["linalg.copy"]} in %func_op_2
-      : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize
-      %bufferized_copy_back vector_sizes [128, 4] : !transform.any_op
-
-  // Canonicalize, cleanup and vector lowering. This step also removes buffer
-  // self-copies.
-  transform.apply_patterns to %func_op_2 {
-    transform.apply_patterns.canonicalization
-    transform.apply_patterns.vector.lower_masked_transfers
-  } {apply_cse} : !transform.any_op
+    // Apply vectorization to copy back from shared memory.
+    // TODO: Find a way to retain the handle to linalg.copy throughout
+    // bufferization.
+    %func_op_2 = transform.structured.match ops{["func.func"]} in %bufferized
+        : (!transform.any_op) -> !transform.any_op
+    %bufferized_copy_back = transform.structured.match ops{["linalg.copy"]} in %func_op_2
+        : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize
+        %bufferized_copy_back vector_sizes [128, 4] : !transform.any_op
+
+    // Canonicalize, cleanup and vector lowering. This step also removes buffer
+    // self-copies.
+    transform.apply_patterns to %func_op_2 {
+      transform.apply_patterns.canonicalization
+      transform.apply_patterns.vector.lower_masked_transfers
+    } {apply_cse} : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -156,74 +158,76 @@ func.func @matmul_not_divisible(%A: tensor<1023x1023xf32>,
   return %1 : tensor<1023x1023xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  // Fuse linalg.fill into linalg.matmul and tile.
-  %matmul_op = transform.structured.match ops{["linalg.matmul"]} in %arg1
-      : (!transform.any_op) -> !transform.any_op
-  %fill_op = transform.structured.match ops{["linalg.fill"]} in %arg1
-      : (!transform.any_op) -> !transform.any_op
-  %tiled_matmul_op, %forall_op = transform.structured.tile_using_forall %matmul_op num_threads [] tile_sizes [128, 128](mapping = [#gpu.block<y>, #gpu.block<x>])
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %fused_op, %new_containing_op = transform.structured.fuse_into_containing_op %fill_op into %forall_op
-      : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
-
-  // Tile linalg.matmul a second time.
-  %tiled_linalg_op, %loops = transform.structured.tile_using_for %tiled_matmul_op[0, 0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-
-  // Pad linalg.matmul.
-  %padded, %pad, %copy_back = transform.structured.pad %tiled_linalg_op
-      {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-       padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1],
-       copy_back_op = "linalg.copy"}
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
-
-  // Map and tile tensor.pad.
-  %pad_forall_op, %tiled_pad_op = transform.structured.gpu.map_copy_to_threads
-      %pad total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.foreach %pad_forall_op : !transform.any_op {
-  ^bb2(%arg2 : !transform.any_op):
-    %if_op = transform.structured.match ops{["scf.if"]} in %arg2
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    // Fuse linalg.fill into linalg.matmul and tile.
+    %matmul_op = transform.structured.match ops{["linalg.matmul"]} in %arg1
+        : (!transform.any_op) -> !transform.any_op
+    %fill_op = transform.structured.match ops{["linalg.fill"]} in %arg1
+        : (!transform.any_op) -> !transform.any_op
+    %tiled_matmul_op, %forall_op = transform.structured.tile_using_forall %matmul_op num_threads [] tile_sizes [128, 128](mapping = [#gpu.block<y>, #gpu.block<x>])
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %fused_op, %new_containing_op = transform.structured.fuse_into_containing_op %fill_op into %forall_op
+        : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    // Tile linalg.matmul a second time.
+    %tiled_linalg_op, %loops = transform.structured.tile_using_for %tiled_matmul_op[0, 0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    // Pad linalg.matmul.
+    %padded, %pad, %copy_back = transform.structured.pad %tiled_linalg_op
+        {padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+         padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1],
+         copy_back_op = "linalg.copy"}
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+
+    // Map and tile tensor.pad.
+    %pad_forall_op, %tiled_pad_op = transform.structured.gpu.map_copy_to_threads
+        %pad total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.foreach %pad_forall_op : !transform.any_op {
+    ^bb2(%arg2 : !transform.any_op):
+      %if_op = transform.structured.match ops{["scf.if"]} in %arg2
+          : (!transform.any_op) -> !transform.any_op
+      // TODO: The scf.if can be avoided with 0x... tensors.
+      transform.scf.take_assumed_branch %if_op take_else_branch
+          : (!transform.any_op) -> ()
+    }
+
+    // Apply masked vectorization to padding ops.
+    transform.structured.vectorize %tiled_pad_op vector_sizes [128, 4]
+        : !transform.any_op
+
+    // Assign shared memory buffer to padding.
+    %buffer, %new_ops = transform.structured.bufferize_to_allocation
+        %pad_forall_op {memory_space = 3, bufferize_destination_only, emit_dealloc}
+        : !transform.any_op
+
+    // Bufferize.
+    %func_op_1 = transform.structured.match ops{["func.func"]} in %arg1
+        : (!transform.any_op) -> !transform.any_op
+    transform.bufferization.eliminate_empty_tensors %func_op_1 : !transform.any_op
+    transform.apply_dce to %func_op_1 : !transform.any_op
+    transform.apply_cse to %func_op_1 : !transform.any_op
+    %bufferized = transform.bufferization.one_shot_bufferize
+        layout{IdentityLayoutMap} %arg1 {bufferize_function_boundaries=true}
         : (!transform.any_op) -> !transform.any_op
-    // TODO: The scf.if can be avoided with 0x... tensors.
-    transform.scf.take_assumed_branch %if_op take_else_branch
-        : (!transform.any_op) -> ()
-  }
 
-  // Apply masked vectorization to padding ops.
-  transform.structured.vectorize %tiled_pad_op vector_sizes [128, 4]
-      : !transform.any_op
-
-  // Assign shared memory buffer to padding.
-  %buffer, %new_ops = transform.structured.bufferize_to_allocation
-      %pad_forall_op {memory_space = 3, bufferize_destination_only, emit_dealloc}
-      : !transform.any_op
-
-  // Bufferize.
-  %func_op_1 = transform.structured.match ops{["func.func"]} in %arg1
-      : (!transform.any_op) -> !transform.any_op
-  transform.bufferization.eliminate_empty_tensors %func_op_1 : !transform.any_op
-  transform.apply_dce to %func_op_1 : !transform.any_op
-  transform.apply_cse to %func_op_1 : !transform.any_op
-  %bufferized = transform.bufferization.one_shot_bufferize
-      layout{IdentityLayoutMap} %arg1 {bufferize_function_boundaries=true}
-      : (!transform.any_op) -> !transform.any_op
-
-  // Apply vectorization to copy back from shared memory.
-  // TODO: Find a way to retain the handle to linalg.copy throughout
-  // bufferization.
-  %func_op_2 = transform.structured.match ops{["func.func"]} in %bufferized
-      : (!transform.any_op) -> !transform.any_op
-  %bufferized_copy_back = transform.structured.match ops{["linalg.copy"]} in %func_op_2
-      : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize
-      %bufferized_copy_back vector_sizes [128, 4] : !transform.any_op
-
-  // Canonicalize, cleanup and vector lowering. This step also removes buffer
-  // self-copies.
-  transform.apply_patterns to %func_op_2 {
-    transform.apply_patterns.canonicalization
-    transform.apply_patterns.vector.lower_masked_transfers
-  } {apply_cse} : !transform.any_op
+    // Apply vectorization to copy back from shared memory.
+    // TODO: Find a way to retain the handle to linalg.copy throughout
+    // bufferization.
+    %func_op_2 = transform.structured.match ops{["func.func"]} in %bufferized
+        : (!transform.any_op) -> !transform.any_op
+    %bufferized_copy_back = transform.structured.match ops{["linalg.copy"]} in %func_op_2
+        : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize
+        %bufferized_copy_back vector_sizes [128, 4] : !transform.any_op
+
+    // Canonicalize, cleanup and vector lowering. This step also removes buffer
+    // self-copies.
+    transform.apply_patterns to %func_op_2 {
+      transform.apply_patterns.canonicalization
+      transform.apply_patterns.vector.lower_masked_transfers
+    } {apply_cse} : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir b/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
index 7101b62886336cf..592eb781cd4ff5c 100644
--- a/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
+++ b/mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
@@ -1,20 +1,22 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter --scf-for-loop-canonicalization --canonicalize --split-input-file %s | FileCheck %s
-// RUN: mlir-opt --test-transform-dialect-interpreter --split-input-file %s | FileCheck %s --check-prefix=NOCANON
+// RUN: mlir-opt --transform-interpreter --scf-for-loop-canonicalization --canonicalize --split-input-file %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter --split-input-file %s | FileCheck %s --check-prefix=NOCANON
 
 // This implements a 2D multisize tiling with target sizes [3, 10].
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!transform.any_op) -> !transform.any_op
-  %t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!transform.any_op) -> !transform.any_op
-  %2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !transform.any_op, !transform.any_op
-  %3:2 = transform.structured.tile_using_for %2#0 [%1#0] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %4:2 = transform.structured.tile_using_for %2#1 [%1#1] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %5 = merge_handles %3#0, %4#0 : !transform.any_op
-  %tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op
-  %6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !transform.any_op, !transform.any_op
-  transform.structured.tile_using_for %6#0 [0, %tt#0] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
-  transform.structured.tile_using_for %6#1 [0, %tt#1] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!transform.any_op) -> !transform.any_op
+    %t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!transform.any_op) -> !transform.any_op
+    %2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !transform.any_op, !transform.any_op
+    %3:2 = transform.structured.tile_using_for %2#0 [%1#0] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %4:2 = transform.structured.tile_using_for %2#1 [%1#1] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %5 = transform.merge_handles %3#0, %4#0 : !transform.any_op
+    %tt:3 = transform.replicate num(%5) %t#0, %t#1, %t#2 : !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op
+    %6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !transform.any_op, !transform.any_op
+    transform.structured.tile_using_for %6#0 [0, %tt#0] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.structured.tile_using_for %6#1 [0, %tt#1] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
@@ -102,19 +104,21 @@ func.func @two_d(%arg0: tensor<10x34xf32>,
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!transform.any_op) -> !transform.param<i64>
-  %t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!transform.any_op) -> !transform.param<i64>
-  %2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !transform.any_op, !transform.param<i64>
-  %3:2 = transform.structured.tile_using_for %2#0 [%1#0] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
-  %4:2 = transform.structured.tile_using_for %2#1 [%1#1] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
-  %5 = merge_handles %3#0, %4#0 : !transform.any_op
-  %tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !transform.any_op, !transform.param<i64>, !transform.param<i64>, !transform.param<i64>
-  %6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !transform.any_op, !transform.param<i64>
-  transform.structured.tile_using_for %6#0 [0, %tt#0] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
-  transform.structured.tile_using_for %6#1 [0, %tt#1] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!transform.any_op) -> !transform.param<i64>
+    %t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!transform.any_op) -> !transform.param<i64>
+    %2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !transform.any_op, !transform.param<i64>
+    %3:2 = transform.structured.tile_using_for %2#0 [%1#0] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+    %4:2 = transform.structured.tile_using_for %2#1 [%1#1] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+    %5 = transform.merge_handles %3#0, %4#0 : !transform.any_op
+    %tt:3 = transform.replicate num(%5) %t#0, %t#1, %t#2 : !transform.any_op, !transform.param<i64>, !transform.param<i64>, !transform.param<i64>
+    %6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !transform.any_op, !transform.param<i64>
+    transform.structured.tile_using_for %6#0 [0, %tt#0] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+    transform.structured.tile_using_for %6#1 [0, %tt#1] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-empty-tensor-elimination.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-empty-tensor-elimination.mlir
index 939eea37e9b7983..0172760576efc51 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize-empty-tensor-elimination.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-empty-tensor-elimination.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func.func @eliminate_tensor_empty(
 //  CHECK-SAME:     %[[arg0:.*]]: tensor<50x91xf32>,
@@ -32,11 +32,13 @@ func.func @eliminate_tensor_empty(
   return %3 : tensor<50x91xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.eliminate_empty_tensors %0 : !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.linalg.erase_unnecessary_inputs
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.eliminate_empty_tensors %0 : !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.linalg.erase_unnecessary_inputs
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir b/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir
index be807a9d5691733..f2e9e839b7c46b0 100644
--- a/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir
+++ b/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir
@@ -1,5 +1,5 @@
 
-// RUN: mlir-opt --test-transform-dialect-interpreter -cse -canonicalize -split-input-file -verify-diagnostics %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter -cse -canonicalize -split-input-file -verify-diagnostics %s | FileCheck %s
 
 #map = affine_map<()[s0] -> (-s0 + 12, 7)>
 
@@ -46,17 +46,19 @@ func.func @pad_to_memory_space(%arg0: tensor<24x12xf32>,
   func.return %5 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 1]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
-  %buffer, %new_ops = transform.structured.bufferize_to_allocation %pad {memory_space = 3, emit_dealloc} : !transform.any_op
-  %2 = transform.bufferization.one_shot_bufferize %arg1 {bufferize_function_boundaries=true} : (!transform.any_op) -> !transform.any_op
-
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 1]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %buffer, %new_ops = transform.structured.bufferize_to_allocation %pad {memory_space = 3, emit_dealloc} : !transform.any_op
+    %2 = transform.bufferization.one_shot_bufferize %arg1 {bufferize_function_boundaries=true} : (!transform.any_op) -> !transform.any_op
+
+    transform.yield
+  }
 }
 
 // -----
@@ -103,17 +105,19 @@ func.func @vectorize_and_bufferize_pad(%arg0: tensor<24x12xf32>,
   func.return %5 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 1]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
-  transform.structured.vectorize %pad vector_sizes [10, 12] : !transform.any_op
-  %vector_write = transform.structured.match ops{["vector.transfer_write"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %mask_op = transform.get_parent_op %vector_write {op_name = "vector.mask"} : (!transform.any_op) -> !transform.any_op
-  %buffer, %new_ops = transform.structured.bufferize_to_allocation %mask_op {memory_space = 3, emit_dealloc} : !transform.any_op
-  %2 = transform.bufferization.one_shot_bufferize %arg1 {bufferize_function_boundaries=true} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 1]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.structured.vectorize %pad vector_sizes [10, 12] : !transform.any_op
+    %vector_write = transform.structured.match ops{["vector.transfer_write"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %mask_op = transform.get_parent_op %vector_write {op_name = "vector.mask"} : (!transform.any_op) -> !transform.any_op
+    %buffer, %new_ops = transform.structured.bufferize_to_allocation %mask_op {memory_space = 3, emit_dealloc} : !transform.any_op
+    %2 = transform.bufferization.one_shot_bufferize %arg1 {bufferize_function_boundaries=true} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 31b29c0e105d99d..4fa56b474f82efd 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 #map1 = affine_map<(d0) -> (d0 + 2)>
 #map2 = affine_map<(d0) -> (d0 + 4)>
@@ -66,10 +66,12 @@ func.func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
 //   CHECK-NOT:         memref.dealloc %[[tmpB]] : memref<48xi8>
 //   CHECK-NOT:         memref.dealloc %[[tmpC]] : memref<24xi8>
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.promote %0 { use_alloca } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 { use_alloca } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -136,10 +138,12 @@ func.func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
 //       CHECK:         memref.dealloc %[[tmpB_f64]] : memref<96xi8>
 //       CHECK:         memref.dealloc %[[tmpC_f64]] : memref<48xi8>
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.promote %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -180,11 +184,13 @@ func.func @gemm_shared(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?
 // CHECK:         linalg.matmul ins(%[[shared_A]], %[[shared_B]]{{.*}} outs(%[[subview_C]]
 
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [16, 16, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  %2 = transform.structured.promote %1 { operands_to_promote = [0, 1], mapping = [#gpu.memory_space<workgroup>] } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [16, 16, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    %2 = transform.structured.promote %1 { operands_to_promote = [0, 1], mapping = [#gpu.memory_space<workgroup>] } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -222,11 +228,13 @@ func.func @gemm_private(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<
 // CHECK:         linalg.matmul ins(%[[private_A]], %[[private_B]]{{.*}} outs(%[[subview_C]]
 
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [16, 16, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  %2 = transform.structured.promote %1 { operands_to_promote = [0, 1], mapping = [#gpu.memory_space<private>] } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [16, 16, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    %2 = transform.structured.promote %1 { operands_to_promote = [0, 1], mapping = [#gpu.memory_space<private>] } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -270,10 +278,12 @@ func.func @promote_rank_reducing_subviews(%arg0:  memref<?x?x?x64xf32, strided<[
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.promote %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -377,8 +387,10 @@ func.func @linalg_generic_update_all_function_inputs_outputs(%arg0: memref<3x4xf
 }
 
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.promote %0 { memory_space = #gpu.address_space<workgroup> } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 { memory_space = #gpu.address_space<workgroup> } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/promotion_options.mlir b/mlir/test/Dialect/Linalg/promotion_options.mlir
index a6daa9af2f37cec..760336ff34f8406 100644
--- a/mlir/test/Dialect/Linalg/promotion_options.mlir
+++ b/mlir/test/Dialect/Linalg/promotion_options.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -canonicalize -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -canonicalize -split-input-file | FileCheck %s
 
 func.func @gemm(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
@@ -34,9 +34,11 @@ func.func @gemm(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>
 //      CHECK:       memref.dealloc %[[tmpA]]
 //      CHECK:       memref.dealloc %[[tmpC]]
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [16, 16, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  %2 = transform.structured.promote %1 { operands_to_promote = [0, 2], force_full_tiles = [false, false], use_full_tiles_by_default } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [16, 16, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    %2 = transform.structured.promote %1 { operands_to_promote = [0, 2], force_full_tiles = [false, false], use_full_tiles_by_default } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/tile-conv.mlir b/mlir/test/Dialect/Linalg/tile-conv.mlir
index 859468fae537978..4a940f12662e6c6 100644
--- a/mlir/test/Dialect/Linalg/tile-conv.mlir
+++ b/mlir/test/Dialect/Linalg/tile-conv.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -canonicalize | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -canonicalize | FileCheck %s
 
 //  CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 2)>
 //  CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 3)>
@@ -9,10 +9,12 @@ func.func @conv(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>, %arg2 : memref
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_2d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loop:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 //       CHECK: func @conv
diff --git a/mlir/test/Dialect/Linalg/tile-indexed.mlir b/mlir/test/Dialect/Linalg/tile-indexed.mlir
index 1e97789bed3f1bd..c176dc19c7e9e53 100644
--- a/mlir/test/Dialect/Linalg/tile-indexed.mlir
+++ b/mlir/test/Dialect/Linalg/tile-indexed.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -canonicalize -split-input-file | FileCheck %s -check-prefix=TILE-10n25
+// RUN: mlir-opt %s -transform-interpreter -canonicalize -split-input-file | FileCheck %s -check-prefix=TILE-10n25
 
 func.func @indexed_vector(%arg0: memref<50xindex>) {
   linalg.generic {indexing_maps = [affine_map<(i) -> (i)>],
@@ -11,10 +11,12 @@ func.func @indexed_vector(%arg0: memref<50xindex>) {
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop = transform.structured.tile_using_for %0 [10] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop = transform.structured.tile_using_for %0 [10] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
@@ -41,10 +43,12 @@ func.func @indexed_matrix(%arg0: memref<50x50xindex>) {
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop:2 = transform.structured.tile_using_for %0 [10, 25] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop:2 = transform.structured.tile_using_for %0 [10, 25] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
diff --git a/mlir/test/Dialect/Linalg/tile-softmax.mlir b/mlir/test/Dialect/Linalg/tile-softmax.mlir
index c158bfd47010eb4..ec848e2deb74e25 100644
--- a/mlir/test/Dialect/Linalg/tile-softmax.mlir
+++ b/mlir/test/Dialect/Linalg/tile-softmax.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -canonicalize --split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -canonicalize --split-input-file | FileCheck %s
 
 // Check that we can tile softmax on tensors.
 // The tiling here is 2x3.
@@ -36,10 +36,12 @@ func.func @softmax(%arg0: tensor<16x64x256xf32>) -> tensor<16x64x256xf32> {
   return %1 : tensor<16x64x256xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.softmax"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loop:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -100,18 +102,20 @@ func.func @softmax_tile_n_fuse(%arg0: tensor<16x64x256xf32>) -> tensor<16x64x256
   return %1 : tensor<16x64x256xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.softmax"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.softmax"]} in %arg1 : (!transform.any_op) -> !transform.any_op
 
-  // Tile the root.
-  %tiled_op, %forall_op = transform.structured.tile_using_forall %0 num_threads [8, 16]
-       : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    // Tile the root.
+    %tiled_op, %forall_op = transform.structured.tile_using_forall %0 num_threads [8, 16]
+         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  // Fuse all producers.
-  %1 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.fuse_into_containing_op %1 into %forall_op
-    : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+    // Fuse all producers.
+    %1 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.fuse_into_containing_op %1 into %forall_op
+      : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 // -----
 
@@ -142,8 +146,10 @@ func.func @softmax_memref(%arg0: memref<16x64x256xf32>, %arg1: memref<16x64x256x
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.softmax"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loop:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/tile-tensors.mlir b/mlir/test/Dialect/Linalg/tile-tensors.mlir
index 5d6bab058c3e5f8..e0429b1f873298b 100644
--- a/mlir/test/Dialect/Linalg/tile-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-tensors.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @matmul_tensors(
 // CHECK-SAME:    %[[TA:[0-9a-z]+]]: tensor<?x?xf32>
@@ -27,10 +27,12 @@ func.func @matmul_tensors(
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:3 = transform.structured.tile_using_for %0 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -58,10 +60,12 @@ func.func @generic_op_tensors(
   return %4 : tensor<?x?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:3 = transform.structured.tile_using_for %0 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @generic_op_tensors
@@ -129,8 +133,10 @@ func.func @fold_extract_slice(
   return %2 : tensor<?x42xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:3 = transform.structured.tile_using_for %0 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/tile-to-forall.mlir b/mlir/test/Dialect/Linalg/tile-to-forall.mlir
index ff5028023b546b8..38742028e481012 100644
--- a/mlir/test/Dialect/Linalg/tile-to-forall.mlir
+++ b/mlir/test/Dialect/Linalg/tile-to-forall.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -canonicalize -cse -split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter -canonicalize -cse -split-input-file | FileCheck %s
 
 // Offset per thread:
 // CHECK-DAG: affine_map<(d0)[s0] -> (d0 * (s0 ceildiv 10))>
@@ -30,11 +30,13 @@ module {
     return %0 : tensor<?x?xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 20] (mapping = [ #gpu.thread<y>, #gpu.thread<x> ] )
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 20] (mapping = [ #gpu.thread<y>, #gpu.thread<x> ] )
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+     transform.yield
+    }
   }
 }
 
@@ -54,11 +56,13 @@ module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 20] (mapping = [ #gpu.thread<y>, #gpu.thread<x> ] )
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 20] (mapping = [ #gpu.thread<y>, #gpu.thread<x> ] )
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -77,11 +81,13 @@ module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 20] (mapping = [ #gpu.thread<y>, #gpu.thread<x> ] )
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 20] (mapping = [ #gpu.thread<y>, #gpu.thread<x> ] )
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -120,12 +126,14 @@ func.func @matmul_tile_size_dynamic_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?x
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %sz = transform.structured.match ops{["test.dummy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.tile_using_forall %0 tile_sizes *(%sz : !transform.any_op)
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %sz = transform.structured.match ops{["test.dummy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.tile_using_forall %0 tile_sizes *(%sz : !transform.any_op)
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -160,11 +168,13 @@ func.func @matmul_static(%A: tensor<100x200xf32>, %B: tensor<200x300xf32>, %C: t
   return %0 : tensor<100x300xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 21]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.tile_using_forall %0 num_threads [10, 21]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 
@@ -202,11 +212,13 @@ func.func @matmul_tile_size_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.tile_using_forall %0 tile_sizes [10, 20]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.tile_using_forall %0 tile_sizes [10, 20]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -239,11 +251,13 @@ func.func @matmul_tile_size_static(%A: tensor<100x200xf32>, %B: tensor<200x300xf
   return %0 : tensor<100x300xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.tile_using_forall %0 tile_sizes [10, 21]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.tile_using_forall %0 tile_sizes [10, 21]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -262,11 +276,13 @@ module {
     return %result : tensor<4xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1:2 = transform.structured.tile_using_forall %0 num_threads [2] ( mapping = [#gpu.thread<x>])
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1:2 = transform.structured.tile_using_forall %0 num_threads [2] ( mapping = [#gpu.thread<x>])
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 }
 // CHECK-DAG: #[[$map0:.+]] = affine_map<(d0) -> (d0 * 2)>
@@ -313,12 +329,14 @@ func.func @matmul_tile_size_dynamic_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?x
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %sz = transform.structured.match ops{["test.dummy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.tile_using_forall %0 tile_sizes [%sz : !transform.any_op, 20]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %sz = transform.structured.match ops{["test.dummy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.tile_using_forall %0 tile_sizes [%sz : !transform.any_op, 20]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -368,11 +386,13 @@ transform.sequence failures(propagate) {
     return %res1, %res2 : tensor<100xf32>, tensor<100xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [7]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [7]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 
 // -----
@@ -422,10 +442,12 @@ transform.sequence failures(propagate) {
     return %res2, %res3 : tensor<300x100xf32>, tensor<300xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%IN_MAT2: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %IN_MAT2 : (!transform.any_op) -> !transform.any_op
-    %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [4]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%IN_MAT2: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %IN_MAT2 : (!transform.any_op) -> !transform.any_op
+      %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [4]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 
diff --git a/mlir/test/Dialect/Linalg/transform-lower-pack.mlir b/mlir/test/Dialect/Linalg/transform-lower-pack.mlir
index ad6c6a6f6199cc6..435ae1a1ae0641c 100644
--- a/mlir/test/Dialect/Linalg/transform-lower-pack.mlir
+++ b/mlir/test/Dialect/Linalg/transform-lower-pack.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -cse --split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -cse --split-input-file | FileCheck %s
 
   // CHECK-LABEL: func.func @pack(
 func.func @pack(%arg0: tensor<129x47x16x16xf32>, %arg1: tensor<17x2x16x16x32x8xf32>) -> tensor<17x2x16x16x32x8xf32> {
@@ -18,12 +18,14 @@ func.func @pack(%arg0: tensor<129x47x16x16xf32>, %arg1: tensor<17x2x16x16x32x8xf
   return %pack : tensor<17x2x16x16x32x8xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -47,12 +49,14 @@ func.func @pack(%arg0: tensor<128x8xf32>, %arg1: tensor<8x8x16x1xf32>) -> tensor
   return %pack : tensor<8x8x16x1xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -79,12 +83,14 @@ func.func @pack_as_pad(%arg0: tensor<129x47x16x16xf32>, %arg1: tensor<1x1x1x1x13
   return %pack :  tensor<1x1x1x1x136x64x16x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -111,12 +117,14 @@ func.func @pack_not_a_pad(%arg0: tensor<129x47x16x16xf32>, %arg1: tensor<1x1x16x
   return %pack :  tensor<1x1x16x16x136x64xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -140,15 +148,17 @@ func.func @unpack(%arg0: tensor<17x2x16x16x32x8xf32>, %arg1: tensor<129x47x16x16
   return %pack : tensor<129x47x16x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %unpack = transform.structured.match ops{["tensor.unpack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.unpack">
-  transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
-    -> (!transform.op<"tensor.empty">,
-        !transform.op<"linalg.transpose">,
-        !transform.op<"tensor.collapse_shape">,
-        !transform.op<"tensor.extract_slice">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %unpack = transform.structured.match ops{["tensor.unpack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.unpack">
+    transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
+      -> (!transform.op<"tensor.empty">,
+          !transform.op<"linalg.transpose">,
+          !transform.op<"tensor.collapse_shape">,
+          !transform.op<"tensor.extract_slice">)
+          transform.yield
+  }
 }
 
 // -----
@@ -171,15 +181,17 @@ func.func @unpack_as_pad(%arg0: tensor<1x1x1x1x136x64x16x16xf32>, %arg1: tensor<
   return %pack : tensor<129x47x16x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %unpack = transform.structured.match ops{["tensor.unpack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.unpack">
-  transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
-    -> (!transform.op<"tensor.empty">,
-        !transform.op<"linalg.transpose">,
-        !transform.op<"tensor.collapse_shape">,
-        !transform.op<"tensor.extract_slice">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %unpack = transform.structured.match ops{["tensor.unpack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.unpack">
+    transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
+      -> (!transform.op<"tensor.empty">,
+          !transform.op<"linalg.transpose">,
+          !transform.op<"tensor.collapse_shape">,
+          !transform.op<"tensor.extract_slice">)
+          transform.yield
+  }
 }
 
 // -----
@@ -204,12 +216,14 @@ func.func @pack_with_outer_dims_perm(%src: tensor<100x200x128x256xi32>,
   return %0 : tensor<200x4x16x100x16x32xi32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -234,12 +248,14 @@ func.func @pack_with_pad(%src: tensor<4225x12xf32>, %dest: tensor<265x16x16x1xf3
   return %0 : tensor<265x16x16x1xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -266,12 +282,14 @@ func.func @pack_with_pad_and_outer_dims_perm(%src: tensor<100x200x127x255xi32>,
   return %0 : tensor<200x4x16x100x16x32xi32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -320,12 +338,14 @@ func.func @dynamic_pack_pad_transpose_inner_and_outer_dims(%source: tensor<?x?xf
   return %pack : tensor<?x?x16x32xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -356,12 +376,14 @@ func.func @pack_as_pad_with_outer_dims_perm(%arg0: tensor<129x47x16x16xf32>, %ar
   return %pack :  tensor<1x1x1x1x136x64x16x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -390,12 +412,14 @@ func.func @pack_as_pad_with_unit_dims(%arg0: tensor<3x1x1x1xf32>, %arg1: tensor<
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.pack">
-  transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
-    -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %pack = transform.structured.match ops{["tensor.pack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.pack">
+    transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+      -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+      transform.yield
+  }
 }
 
 // -----
@@ -424,13 +448,15 @@ func.func @unpack_with_dynamic_dest(%arg0: tensor<32x2x49x16x16xf32>, %arg1: ten
   return %pack : tensor<32x?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %unpack = transform.structured.match ops{["tensor.unpack"]} in %module_op
-    : (!transform.any_op) -> !transform.op<"tensor.unpack">
-  transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
-    -> (!transform.op<"tensor.empty">,
-        !transform.op<"linalg.transpose">,
-        !transform.op<"tensor.collapse_shape">,
-        !transform.op<"tensor.extract_slice">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %unpack = transform.structured.match ops{["tensor.unpack"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"tensor.unpack">
+    transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
+      -> (!transform.op<"tensor.empty">,
+          !transform.op<"linalg.transpose">,
+          !transform.op<"tensor.collapse_shape">,
+          !transform.op<"tensor.extract_slice">)
+          transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
index 7dee14f22df5d08..73c5e28d1200e4c 100644
--- a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics \
-// RUN:   -test-transform-dialect-interpreter -canonicalize \
+// RUN:   -transform-interpreter -canonicalize \
 // RUN:   -allow-unregistered-dialect -split-input-file %s | FileCheck %s
 
 // CHECK:       #[[$map:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)>
@@ -29,20 +29,22 @@ func.func @tensor_pad_constant(%t: tensor<?x10xindex>, %l2: index, %h1: index,
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {emit_dealloc} : !transform.any_op
-
-  // Ensure that one linalg.fill was generated.
-  %fill_op = transform.select "linalg.fill" in %new : (!transform.any_op) -> !transform.any_op
-  // expected-remark @below{{1}}
-  test_print_number_of_associated_payload_ir_ops %fill_op : !transform.any_op
-
-  // Ensure that one linalg.copy was generated.
-  %tensor_store = transform.select "memref.tensor_store" in %new : (!transform.any_op) -> !transform.any_op
-  // expected-remark @below{{1}}
-  test_print_number_of_associated_payload_ir_ops %tensor_store : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {emit_dealloc} : !transform.any_op
+
+    // Ensure that one linalg.fill was generated.
+    %fill_op = transform.select "linalg.fill" in %new : (!transform.any_op) -> !transform.any_op
+    // expected-remark @below{{1}}
+    transform.test_print_number_of_associated_payload_ir_ops %fill_op : !transform.any_op
+
+    // Ensure that one linalg.copy was generated.
+    %tensor_store = transform.select "memref.tensor_store" in %new : (!transform.any_op) -> !transform.any_op
+    // expected-remark @below{{1}}
+    transform.test_print_number_of_associated_payload_ir_ops %tensor_store : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -64,28 +66,30 @@ func.func @tensor_pad_constant_with_custom_copy(
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 3, alloc_op = "memref.alloca", memcpy_op = "linalg.copy", emit_dealloc}: !transform.any_op
-
-  // Ensure that one linalg.fill was generated.
-  %fill_op = transform.select "linalg.fill" in %new : (!transform.any_op) -> !transform.any_op
-  // expected-remark @below{{1}}
-  test_print_number_of_associated_payload_ir_ops %fill_op : !transform.any_op
-
-  // Ensure that one linalg.copy was generated.
-  %linalg_copy = transform.select "linalg.copy" in %new : (!transform.any_op) -> !transform.any_op
-  // expected-remark @below{{1}}
-  test_print_number_of_associated_payload_ir_ops %linalg_copy : !transform.any_op
-
-  // Ensure that one memref.alloca was generated.
-  %alloca = transform.select "memref.alloca" in %new : (!transform.any_op) -> !transform.any_op
-  // expected-remark @below{{1}}
-  test_print_number_of_associated_payload_ir_ops %alloca : !transform.any_op
-
-  // Make sure that One-Shot Bufferize can bufferize the rest.
-  %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 3, alloc_op = "memref.alloca", memcpy_op = "linalg.copy", emit_dealloc}: !transform.any_op
+
+    // Ensure that one linalg.fill was generated.
+    %fill_op = transform.select "linalg.fill" in %new : (!transform.any_op) -> !transform.any_op
+    // expected-remark @below{{1}}
+    transform.test_print_number_of_associated_payload_ir_ops %fill_op : !transform.any_op
+
+    // Ensure that one linalg.copy was generated.
+    %linalg_copy = transform.select "linalg.copy" in %new : (!transform.any_op) -> !transform.any_op
+    // expected-remark @below{{1}}
+    transform.test_print_number_of_associated_payload_ir_ops %linalg_copy : !transform.any_op
+
+    // Ensure that one memref.alloca was generated.
+    %alloca = transform.select "memref.alloca" in %new : (!transform.any_op) -> !transform.any_op
+    // expected-remark @below{{1}}
+    transform.test_print_number_of_associated_payload_ir_ops %alloca : !transform.any_op
+
+    // Make sure that One-Shot Bufferize can bufferize the rest.
+    %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -107,12 +111,14 @@ func.func @tensor_pad_constant(%t: tensor<?x10xindex>, %l2: index, %h1: index,
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {emit_dealloc} : !transform.any_op
-  // Make sure that One-Shot Bufferize can bufferize the rest.
-  %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {emit_dealloc} : !transform.any_op
+    // Make sure that One-Shot Bufferize can bufferize the rest.
+    %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -131,12 +137,14 @@ func.func @tensor_insert(%t: tensor<?x10xindex>, %idx: index, %v: index) -> tens
   return %r : tensor<?x10xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
-  // Make sure that One-Shot Bufferize can bufferize the rest.
-  %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
+    // Make sure that One-Shot Bufferize can bufferize the rest.
+    %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -154,12 +162,14 @@ func.func @tensor_insert_into_empty(%idx: index, %v: index) -> tensor<10xindex>
   return %r : tensor<10xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
-  // Make sure that One-Shot Bufferize can bufferize the rest.
-  %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
+    // Make sure that One-Shot Bufferize can bufferize the rest.
+    %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -170,11 +180,13 @@ func.func @tensor_extract(%t: tensor<?x10xindex>, %idx: index) -> index {
   return %r : index
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.extract"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below{{failed to bufferize operation}}
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.extract"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below{{failed to bufferize operation}}
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -192,10 +204,12 @@ func.func @vector_mask(%t: tensor<?xf32>, %val: vector<16xf32>, %idx: index, %m0
   return %r : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["vector.mask"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["vector.mask"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, emit_dealloc} : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -213,10 +227,12 @@ func.func @tensor_insert_destination(%t: tensor<?x10xindex>, %idx: index, %v: in
   return %r : tensor<?x10xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, bufferize_destination_only, emit_dealloc} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, bufferize_destination_only, emit_dealloc} : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -237,10 +253,12 @@ func.func @scf_for_destination(%t: tensor<?x10xindex>, %lb: index, %ub: index, %
   return %r : tensor<?x10xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.for"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, bufferize_destination_only, emit_dealloc} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.for"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, bufferize_destination_only, emit_dealloc} : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -252,8 +270,10 @@ func.func @tensor_insert_destination_no_dealloc(%t: tensor<?x10xindex>, %idx: in
   return %r : tensor<?x10xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, bufferize_destination_only} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %new = transform.structured.bufferize_to_allocation %0 {memory_space = 4, bufferize_destination_only} : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir b/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir
index ee79a188345080f..477261882421c95 100644
--- a/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir
@@ -1,39 +1,41 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 //      CHECK-LABEL: masked_matmul
 func.func @masked_matmul(%module: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
 
   //      CHECK: %[[MLHS:.*]] = vector.create_mask {{.*}} : vector<8x8xi1>
-  //      CHECK: %[[LHS:.*]] = vector.transfer_read %{{.*}}, %[[MLHS]] {in_bounds = [true, true]} : memref<?x?xf32, strided<[?, 1], offset: ?>>, vector<8x8xf32> 
-  //      CHECK: %[[MRHS:.*]] = vector.create_mask {{.*}} : vector<8x8xi1> 
+  //      CHECK: %[[LHS:.*]] = vector.transfer_read %{{.*}}, %[[MLHS]] {in_bounds = [true, true]} : memref<?x?xf32, strided<[?, 1], offset: ?>>, vector<8x8xf32>
+  //      CHECK: %[[MRHS:.*]] = vector.create_mask {{.*}} : vector<8x8xi1>
   //      CHECK: %[[RHS:.*]] = vector.transfer_read %{{.*}}, %[[MRHS]] {in_bounds = [true, true]} : memref<?x?xf32, strided<[?, 1], offset: ?>>, vector<8x8xf32>
   //      CHECK: %[[MACC:.*]] = vector.create_mask {{.*}} : vector<8x8xi1>
-  //      CHECK: %[[ACC:.*]] = vector.transfer_read {{.*}}, %[[MACC]] {in_bounds = [true, true]} : memref<?x?xf32, strided<[?, 1], offset: ?>>, vector<8x8xf32> 
+  //      CHECK: %[[ACC:.*]] = vector.transfer_read {{.*}}, %[[MACC]] {in_bounds = [true, true]} : memref<?x?xf32, strided<[?, 1], offset: ?>>, vector<8x8xf32>
   //      CHECK: %[[MRES:.*]] = vector.create_mask {{.*}} : vector<8x8x8xi1>
   //      CHECK: %[[RES:.*]] = vector.mask %[[MRES]] { vector.contract
   // CHECK-SAME:   : vector<8x8xf32>, vector<8x8xf32> into vector<8x8xf32>
   // CHECK-SAME:   : vector<8x8x8xi1> -> vector<8x8xf32>
-  //      CHECK: vector.transfer_write %[[RES]], %{{.*}}, %[[MACC]] {in_bounds = [true, true]} : vector<8x8xf32>, memref<?x?xf32, strided<[?, 1], offset: ?>> 
+  //      CHECK: vector.transfer_write %[[RES]], %{{.*}}, %[[MACC]] {in_bounds = [true, true]} : vector<8x8xf32>, memref<?x?xf32, strided<[?, 1], offset: ?>>
   linalg.matmul ins(%module, %arg1 : memref<?x?xf32>, memref<?x?xf32>) outs(%arg2 : memref<?x?xf32>)
   return
 }
 
-transform.sequence  failures(propagate) {
-^bb0(%module: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %module 
-    : (!transform.any_op) -> !transform.any_op
-  %tiled_linalg_op, %loops:3 = transform.structured.tile_using_for %0[64, 128, 256] 
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  %tiled_linalg_op_0, %loops_1:3 = transform.structured.tile_using_for %tiled_linalg_op[8, 8, 8] 
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  transform.structured.vectorize %tiled_linalg_op_0 vector_sizes [8, 8, 8]
-    : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %module
+      : (!transform.any_op) -> !transform.any_op
+    %tiled_linalg_op, %loops:3 = transform.structured.tile_using_for %0[64, 128, 256]
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    %tiled_linalg_op_0, %loops_1:3 = transform.structured.tile_using_for %tiled_linalg_op[8, 8, 8]
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.structured.vectorize %tiled_linalg_op_0 vector_sizes [8, 8, 8]
+      : !transform.any_op
 
-  %func = transform.structured.match ops{["func.func"]} in %module 
-    : (!transform.any_op) -> !transform.any_op
-  apply_patterns to %func {
-    transform.apply_patterns.vector.lower_masked_transfers
-    transform.apply_patterns.vector.transfer_permutation_patterns
-    transform.apply_patterns.vector.reduction_to_contract
-  } : !transform.any_op
+    %func = transform.structured.match ops{["func.func"]} in %module
+      : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %func {
+      transform.apply_patterns.vector.lower_masked_transfers
+      transform.apply_patterns.vector.transfer_permutation_patterns
+      transform.apply_patterns.vector.reduction_to_contract
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-decompose.mlir b/mlir/test/Dialect/Linalg/transform-op-decompose.mlir
index e02b029b2fdc993..f057a70d1396448 100644
--- a/mlir/test/Dialect/Linalg/transform-op-decompose.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-decompose.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter --split-input-file %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter --split-input-file %s | FileCheck %s
 
 // CHECK-DAG:  #[[$MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-DAG:  #[[$MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
@@ -243,11 +243,13 @@ func.func @softmax(%arg0: tensor<2x16x32xf32>, %dst: tensor<2x16x32xf32>) -> ten
 // CHECK:        } -> tensor<2x16x32xf32>
 // CHECK:        return %[[D7]] : tensor<2x16x32xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.decompose %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.decompose %0 : (!transform.any_op) -> !transform.any_op
 
-  %2 = transform.structured.match ops{["linalg.softmax"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %3 = transform.structured.decompose_interface %2 : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.match ops{["linalg.softmax"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %3 = transform.structured.decompose_interface %2 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir b/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
index 02fb76445b8071b..bcaa3b893a6ba4b 100644
--- a/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-fuse-into-containing.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter --split-input-file %s -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt --transform-interpreter --split-input-file %s -verify-diagnostics | FileCheck %s
 
 #map0 = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
 #map1 = affine_map<(d0)[s0] -> (d0 * s0)>
@@ -41,14 +41,16 @@ module {
   func.func @dummy2() { return }
   func.func @dummy3() { return }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.fill">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.fill">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // linalg.fill is tileable. The op is tiled and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.fill">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      // linalg.fill is tileable. The op is tiled and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.fill">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -85,14 +87,16 @@ module {
     func.return %2 : tensor<64xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.op<"tensor.empty">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.op<"tensor.empty">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // tensor.empty is not tileable. The op is cloned and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"tensor.empty">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      // tensor.empty is not tileable. The op is cloned and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"tensor.empty">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -132,14 +136,16 @@ module {
     func.return %2 : tensor<?xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.fill">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.fill">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // linalg.fill is tileable. The op is tiled and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.fill">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      // linalg.fill is tileable. The op is tiled and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.fill">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -181,14 +187,16 @@ module {
     func.return %2 : tensor<?xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.any_op
 
-    // linalg.fill is tileable. The op is tiled and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+      // linalg.fill is tileable. The op is tiled and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -242,14 +250,16 @@ module {
     func.return %2 : tensor<?xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // linalg.generic is tileable. The op is tiled and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      // linalg.generic is tileable. The op is tiled and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -276,16 +286,18 @@ module {
     return %1 : tensor<2xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.any_op
 
-    // Create a new handle that points to `linalg.fill` twice.
-    %2 = transform.merge_handles %0, %0 : !transform.any_op
+      // Create a new handle that points to `linalg.fill` twice.
+      %2 = transform.merge_handles %0, %0 : !transform.any_op
 
-    // It shouldn't be a problem to fuse this handle.
-    transform.structured.fuse_into_containing_op %2 into %1 : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+      // It shouldn't be a problem to fuse this handle.
+      transform.structured.fuse_into_containing_op %2 into %1 : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -345,15 +357,17 @@ module {
     // CHECK: }
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // linalg.generic is tileable. The op is tiled and fused.
-    %fused, %containing = transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
-    test_print_remark_at_operand %containing, "new containing op" : !transform.any_op
+      // linalg.generic is tileable. The op is tiled and fused.
+      %fused, %containing = transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      transform.test_print_remark_at_operand %containing, "new containing op" : !transform.any_op
+      transform.yield
+    }
   }
 }
 
@@ -412,14 +426,16 @@ module {
     // CHECK: }
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // linalg.generic is tileable. The op is tiled and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      // linalg.generic is tileable. The op is tiled and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -477,14 +493,16 @@ module {
     // CHECK: }
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
 
-    // linalg.generic is tileable. The op is tiled and fused.
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+      // linalg.generic is tileable. The op is tiled and fused.
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -550,16 +568,18 @@ module {
     // CHECK: }
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
-    %add, %reduce = transform.split_handle %0 : (!transform.op<"linalg.generic">) -> (!transform.op<"linalg.generic">, !transform.op<"linalg.generic">)
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
-
-    %fused_ops, %new_forall = transform.structured.fuse_into_containing_op %reduce into %1
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.op<"scf.forall">)
-    %fused_ops_2, %new_forall_2 = transform.structured.fuse_into_containing_op %add into %new_forall
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.op<"scf.forall">)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
+      %add, %reduce = transform.split_handle %0 : (!transform.op<"linalg.generic">) -> (!transform.op<"linalg.generic">, !transform.op<"linalg.generic">)
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+
+      %fused_ops, %new_forall = transform.structured.fuse_into_containing_op %reduce into %1
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.op<"scf.forall">)
+      %fused_ops_2, %new_forall_2 = transform.structured.fuse_into_containing_op %add into %new_forall
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.op<"scf.forall">)
+        transform.yield
+    }
   }
 }
 
@@ -620,12 +640,14 @@ module {
     return %7 : tensor<16x128x128xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    %0 = transform.structured.match attributes{producer} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
-    %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
-    transform.structured.fuse_into_containing_op %0 into %1
-      : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match attributes{producer} in %arg1 : (!transform.any_op) -> !transform.op<"linalg.generic">
+      %1 = transform.structured.match ops{["scf.forall"]} in %arg1 : (!transform.any_op) -> !transform.op<"scf.forall">
+      transform.structured.fuse_into_containing_op %0 into %1
+        : (!transform.op<"linalg.generic">, !transform.op<"scf.forall">) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -645,19 +667,21 @@ func.func @copy_1d_1024xf16(%arg0: tensor<123x456xf32>, %arg1: tensor<456x789xf3
   return %2 : tensor<123x789xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  %tiled_op, %forall_op = transform.structured.tile_using_forall %1
-    num_threads [] tile_sizes [50, 16]
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  // Note that we pass in %tiled_op, which isn't a container op.
-  // expected-error @+2 {{could not find next producer to fuse into container}}
-  %fused_op, %new_containing_op =
-    transform.structured.fuse_into_containing_op %0 into %tiled_op
-      : (!transform.any_op, !transform.any_op)
-        -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    %tiled_op, %forall_op = transform.structured.tile_using_forall %1
+      num_threads [] tile_sizes [50, 16]
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    // Note that we pass in %tiled_op, which isn't a container op.
+    // expected-error @+2 {{could not find next producer to fuse into container}}
+    %fused_op, %new_containing_op =
+      transform.structured.fuse_into_containing_op %0 into %tiled_op
+        : (!transform.any_op, !transform.any_op)
+          -> (!transform.any_op, !transform.any_op)
+          transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-fuse.mlir b/mlir/test/Dialect/Linalg/transform-op-fuse.mlir
index 72c61661b55efd3..69daf8c80a16d31 100644
--- a/mlir/test/Dialect/Linalg/transform-op-fuse.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-fuse.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file -canonicalize | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file -canonicalize | FileCheck %s
 
 // CHECK-LABEL: func.func @fuse_unary
 func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
@@ -15,11 +15,13 @@ func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<
   return %1 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -43,12 +45,14 @@ func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<
   return %1 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
-    : (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">, !transform.any_op)
-  transform.loop.peel %loops#0 : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.elemwise_binary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
+      : (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">, !transform.any_op)
+    transform.loop.peel %loops#0 : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -86,13 +90,15 @@ func.func @interchange_reduction(%input: tensor<12x7x25xf32>) -> tensor<12x25xf3
   func.return %0 : tensor<12x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [5, 0, 7], tile_interchange = [0, 2, 1]}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
-  %2, %loops_2 = transform.structured.tile_using_for %1 [0, 4]
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [5, 0, 7], tile_interchange = [0, 2, 1]}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %2, %loops_2 = transform.structured.tile_using_for %1 [0, 4]
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -112,11 +118,13 @@ func.func @unpack_elemwise(%arg0: tensor<16x48x8x8xf32>, %arg1: tensor<128x384xf
   return %2 : tensor<128x384xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [16, 32], tile_interchange = [0, 1]}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [16, 32], tile_interchange = [0, 1]}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -136,11 +144,13 @@ func.func @pack_elemwise(%arg0: tensor<128x384xf32>, %arg1: tensor<16x48x8x8xf32
   return %2 : tensor<16x48x8x8xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [3, 5, 0, 0]}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:2 = transform.structured.fuse %0 {tile_sizes = [3, 5, 0, 0]}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -160,9 +170,11 @@ func.func @nofuse_pack_elemwise(%arg0: tensor<128x384xf32>, %arg1: tensor<16x48x
   return %2 : tensor<16x48x8x8xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.fuse %0 {tile_sizes = [3, 5, 2, 0]}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.elemwise_unary"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.fuse %0 {tile_sizes = [3, 5, 2, 0]}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-generalize.mlir b/mlir/test/Dialect/Linalg/transform-op-generalize.mlir
index 470f98eb3a943e1..a0aa33c072dd4e6 100644
--- a/mlir/test/Dialect/Linalg/transform-op-generalize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-generalize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter %s | FileCheck %s
 
 // CHECK-LABEL: func.func @generalize_unary
 func.func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
@@ -25,8 +25,10 @@ func.func @map_no_inputs(%input: tensor<16x32x64xf32>,
       }
   func.return %reduce : tensor<16x64xf32>
 }
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.generalize %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.generalize %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-gpu-map-copy-to-threads.mlir b/mlir/test/Dialect/Linalg/transform-op-gpu-map-copy-to-threads.mlir
index c9657493c245dbf..ac8e2c9f575705d 100644
--- a/mlir/test/Dialect/Linalg/transform-op-gpu-map-copy-to-threads.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-gpu-map-copy-to-threads.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -test-transform-dialect-interpreter -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s
+// RUN: mlir-opt -transform-interpreter -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s
 
 
 !tt = tensor<8xf16>
@@ -10,17 +10,19 @@ func.func @copy_1d_8xf16(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (1) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<8xf16>
   // CHECK: {mapping = [#gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -44,13 +46,15 @@ func.func @pad_1d_8xf16(%t0: !tin, %sz: index) -> !tt {
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"tensor.pad">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"tensor.pad">)
+    transform.yield
+  }
 }
 
 // -----
@@ -64,17 +68,19 @@ func.func @copy_1d_16xf16(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (2) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<8xf16>
   // CHECK: {mapping = [#gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -88,17 +94,19 @@ func.func @copy_1d_20xf16(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (5) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<4xf16>
   // CHECK: {mapping = [#gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 
@@ -113,17 +121,19 @@ func.func @copy_1d_20xf16(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (5) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<4xf16>
   // CHECK: {mapping = [#gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -137,17 +147,19 @@ func.func @copy_1d_128xf16(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (32) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<4xf16>
   // CHECK: {mapping = [#gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -160,17 +172,19 @@ func.func @copy_1d_256xf16(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (32) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<8xf16>
   // CHECK: {mapping = [#gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -182,17 +196,19 @@ func.func @copy_3d_16x32x64xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (1, 8, 4) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<16x4x16xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -204,17 +220,19 @@ func.func @copy_3d_16x32x64xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (1, 4, 8) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<16x8x8xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 64
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 64
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -226,17 +244,19 @@ func.func @copy_3d_4x8x16xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (4, 8, 1) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<1x1x16xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -248,17 +268,19 @@ func.func @copy_3d_4x8x16xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (1, 2, 16) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<4x4x1xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 8
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 8
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -274,17 +296,19 @@ func.func @copy_3d_3x5x7xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (3, 1, 7) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<1x5x1xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 8
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 8
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -297,17 +321,19 @@ func.func @copy_3d_16x15x5xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (8, 3, 5) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<2x5x1xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 128 desired_bit_alignment = 8
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 128 desired_bit_alignment = 8
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -320,17 +346,19 @@ func.func @copy_3d_16x15x40xi8(%t0: !tt, %out: !tt) -> !tt {
   // CHECK: scf.forall {{.*}} in (8, 3, 5) {{.*}}
   // CHECK:   linalg.copy {{.*}} -> tensor<2x5x8xi8>
   // CHECK: {mapping = [#gpu.thread<linear_dim_2>, #gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 128 desired_bit_alignment = 64
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 128 desired_bit_alignment = 64
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 
@@ -349,18 +377,20 @@ func.func @copy_1d_1024xf16(%t0: !tt, %out: !tt) -> !tt {
   /// of threads.
 
   // expected-note @below {{target op}}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -372,20 +402,22 @@ func.func @copy_1d_257xf16(%t0: !tt, %out: !tt) -> !tt {
   /// Too much data for all threads, we do not try to recover here, this is the
   /// job of higher-level transformations to select better tile sizes and number
   /// of threads.
-  
+
   // expected-note @below {{target op}}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 128
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 128
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -394,22 +426,24 @@ transform.sequence failures(propagate) {
 
 // NO-CHECK-LABEL-ON-EXPECTED-ERROR
 func.func @copy_1d_512xi8(%t0: !tt, %out: !tt) -> !tt {
-  /// Too much data for all threads given the forced alignment to 8b, 
-  /// we do not try to recover here, this is the job of higher-level 
+  /// Too much data for all threads given the forced alignment to 8b,
+  /// we do not try to recover here, this is the job of higher-level
   /// transformations to select better tile sizes and number of threads.
   // expected-note @below {{target op}}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 8
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 8
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
 
 // -----
@@ -418,20 +452,22 @@ transform.sequence failures(propagate) {
 
 // NO-CHECK-LABEL-ON-EXPECTED-ERROR
 func.func @copy_3d_16x32x64xi8(%t0: !tt, %out: !tt) -> !tt {
-  /// Too much data for all threads given the forced alignment to 8b, 
-  /// we do not try to recover here, this is the job of higher-level 
+  /// Too much data for all threads given the forced alignment to 8b,
+  /// we do not try to recover here, this is the job of higher-level
   /// transformations to select better tile sizes and number of threads.
   // expected-note @below {{target op}}
-  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt 
+  %0 = linalg.copy ins(%t0: !tt) outs(%out: !tt) -> !tt
   return %0 : !tt
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
-  transform.structured.gpu.map_copy_to_threads %0 
-    total_num_threads = 32 desired_bit_alignment = 8
-      : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{too few threads to map copy op to threads on the most minor dimension, given alignment and vector size constraints}}
+    transform.structured.gpu.map_copy_to_threads %0
+      total_num_threads = 32 desired_bit_alignment = 8
+        : (!transform.any_op) -> (!transform.op<"scf.forall">, !transform.op<"linalg.copy">)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir b/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir
index 02117456d1e53f4..6bec1cbd65be68b 100644
--- a/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir
@@ -1,6 +1,6 @@
 // BUILD-PACKING-LOOP-NEST only checks the creation of packing code but does not connect it.
 // Do not run canonicalization as it would be DCE'd away.
-// RUN: mlir-opt --test-transform-dialect-interpreter -split-input-file --verify-diagnostics %s | FileCheck %s --check-prefix=BUILD-PACKING-LOOP-NEST
+// RUN: mlir-opt --transform-interpreter -split-input-file --verify-diagnostics %s | FileCheck %s --check-prefix=BUILD-PACKING-LOOP-NEST
 
 func.func @pad_and_hoist_rhs(
   %arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>)
@@ -10,26 +10,28 @@ func.func @pad_and_hoist_rhs(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  // In this case, the pad op is actually empty: we only tile the first dimension
-  // and it does not have an impact on the RHS operand.
-  %pad = transform.get_producer_of_operand %matmul_padded[1]
-    : (!transform.any_op) -> !transform.any_op
+    // In this case, the pad op is actually empty: we only tile the first dimension
+    // and it does not have an impact on the RHS operand.
+    %pad = transform.get_producer_of_operand %matmul_padded[1]
+      : (!transform.any_op) -> !transform.any_op
 
-  // expected-error @below {{requires exactly 2 non-null handles}}
-  transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1
-     : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    // expected-error @below {{requires exactly 2 non-null handles}}
+    transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1
+       : (!transform.any_op, !transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -42,25 +44,27 @@ func.func @pad_and_hoist_init(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[2]
-    : (!transform.any_op) -> !transform.any_op
+    %pad = transform.get_producer_of_operand %matmul_padded[2]
+      : (!transform.any_op) -> !transform.any_op
 
-  // We do not know yet how to hoist the init.
-  // expected-error @below {{could not build packing loop nest}}
-  transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1
-     : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    // We do not know yet how to hoist the init.
+    // expected-error @below {{could not build packing loop nest}}
+    transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1
+       : (!transform.any_op, !transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -80,23 +84,25 @@ func.func @pad_and_hoist_lhs(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[0]
-    : (!transform.any_op) -> !transform.any_op
+    %pad = transform.get_producer_of_operand %matmul_padded[0]
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1
-     : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1
+       : (!transform.any_op, !transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -118,23 +124,25 @@ func.func @pad_and_hoist_lhs_transpose(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[0]
-    : (!transform.any_op) -> !transform.any_op
+    %pad = transform.get_producer_of_operand %matmul_padded[0]
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1, transpose by [1, 0]
-     : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1, transpose by [1, 0]
+       : (!transform.any_op, !transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -154,21 +162,23 @@ func.func @pad_and_hoist_init(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
-  %matmul_l1, %loops_l1:2 = transform.structured.tile_using_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1:2 = transform.structured.tile_using_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[2]
-    : (!transform.any_op) -> !transform.any_op
+    %pad = transform.get_producer_of_operand %matmul_padded[2]
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1#1
-     : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.structured.hoist_pad.build_packing_loop_nest %pad above %loops_l1#1
+       : (!transform.any_op, !transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir b/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir
index e8777303cbe360f..b66db855035bcd7 100644
--- a/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter -canonicalize -split-input-file --verify-diagnostics %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter -canonicalize -split-input-file --verify-diagnostics %s | FileCheck %s
 
 func.func @pad_and_hoist_rhs(
   %arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>)
@@ -9,29 +9,31 @@ func.func @pad_and_hoist_rhs(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    copy_back_op = "none"
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      copy_back_op = "none"
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  // In this case, the pad op is actually empty: we only tile the first dimension
-  // and it does not have an impact on the RHS operand.
-  // expected-error @below {{incompatible payload operation name}}
-  %pad = transform.get_producer_of_operand %matmul_padded[1]
-    : (!transform.any_op) -> !transform.op<"tensor.pad">
+    // In this case, the pad op is actually empty: we only tile the first dimension
+    // and it does not have an impact on the RHS operand.
+    // expected-error @below {{incompatible payload operation name}}
+    %pad = transform.get_producer_of_operand %matmul_padded[1]
+      : (!transform.any_op) -> !transform.op<"tensor.pad">
 
-  // We do not even reach this transform op.
-  transform.structured.hoist_pad %pad by 1 loops
-     : (!transform.op<"tensor.pad">) -> !transform.any_op
+    // We do not even reach this transform op.
+    transform.structured.hoist_pad %pad by 1 loops
+       : (!transform.op<"tensor.pad">) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -45,27 +47,29 @@ func.func @pad_and_hoist_init(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    copy_back_op = "none"
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      copy_back_op = "none"
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[2]
-    : (!transform.any_op) -> !transform.op<"tensor.pad">
+    %pad = transform.get_producer_of_operand %matmul_padded[2]
+      : (!transform.any_op) -> !transform.op<"tensor.pad">
 
-  // We do not know yet how to hoist the init.
-  // expected-error @below {{transform.structured.hoist_pad failed to apply}}
-  transform.structured.hoist_pad %pad by 1 loops
-     : (!transform.op<"tensor.pad">) -> !transform.any_op
+    // We do not know yet how to hoist the init.
+    // expected-error @below {{transform.structured.hoist_pad failed to apply}}
+    transform.structured.hoist_pad %pad by 1 loops
+       : (!transform.op<"tensor.pad">) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -88,25 +92,27 @@ func.func @pad_and_hoist_lhs(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    copy_back_op = "none"
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      copy_back_op = "none"
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[0]
-    : (!transform.any_op) -> !transform.any_op
+    %pad = transform.get_producer_of_operand %matmul_padded[0]
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.structured.hoist_pad %pad by 1 loops
-     : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_pad %pad by 1 loops
+       : (!transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -133,25 +139,27 @@ func.func @pad_and_hoist_lhs_transpose(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
 
-  %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    copy_back_op = "none"
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      copy_back_op = "none"
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[0]
-    : (!transform.any_op) -> !transform.any_op
+    %pad = transform.get_producer_of_operand %matmul_padded[0]
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.structured.hoist_pad %pad by 1 loops, transpose by [1, 0]
-     : (!transform.any_op) -> !transform.any_op
+    transform.structured.hoist_pad %pad by 1 loops, transpose by [1, 0]
+       : (!transform.any_op) -> !transform.any_op
+       transform.yield
+  }
 }
 
 // -----
@@ -177,23 +185,25 @@ func.func @pad_and_hoist_init(
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
 
 
-  %matmul_l1, %loops_l1:2 = transform.structured.tile_using_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_l1, %loops_l1:2 = transform.structured.tile_using_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
-    padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    copy_back_op = "none"
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    %matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
+      padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      copy_back_op = "none"
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
 
-  %pad = transform.get_producer_of_operand %matmul_padded[2]
-    : (!transform.any_op) -> !transform.op<"tensor.pad">
+    %pad = transform.get_producer_of_operand %matmul_padded[2]
+      : (!transform.any_op) -> !transform.op<"tensor.pad">
 
-  transform.structured.hoist_pad %pad by 1 loops
-     : (!transform.op<"tensor.pad">) -> !transform.any_op
+    transform.structured.hoist_pad %pad by 1 loops
+       : (!transform.op<"tensor.pad">) -> !transform.any_op
+       transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-insert-slice-to-copy.mlir b/mlir/test/Dialect/Linalg/transform-op-insert-slice-to-copy.mlir
index e6b2d2b0c4c3ebe..8894f06334abc11 100644
--- a/mlir/test/Dialect/Linalg/transform-op-insert-slice-to-copy.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-insert-slice-to-copy.mlir
@@ -1,8 +1,8 @@
-// RUN: mlir-opt -test-transform-dialect-interpreter %s --split-input-file --allow-unregistered-dialect | FileCheck %s
+// RUN: mlir-opt -transform-interpreter %s --split-input-file --allow-unregistered-dialect | FileCheck %s
 
 // CHECK-LABEL: func @insert_slice_to_copy
     // CHECK-SAME: %[[I:.*]]: tensor<2x3xf32>
-    // CHECK-SAME: %[[O:.*]]: tensor<?x?xf32>, 
+    // CHECK-SAME: %[[O:.*]]: tensor<?x?xf32>,
     // CHECK-SAME: %[[OFF0:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[OFF1:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[SZ0:[0-9a-zA-Z]+]]: index,
@@ -10,34 +10,36 @@
     // CHECK-SAME: %[[ST0:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[ST1:[0-9a-zA-Z]+]]: index)
 func.func @insert_slice_to_copy(
-    %I : tensor<2x3xf32>, %O : tensor<?x?xf32>, 
+    %I : tensor<2x3xf32>, %O : tensor<?x?xf32>,
     %off0 : index, %off1 : index,
     %sz0 : index, %sz1 : index,
     %st0 : index, %st1 : index) -> tensor<?x?xf32> {
 
-  //      CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[O]][%[[OFF0]], %[[OFF1]]] [2, 3] [%[[ST0]], %[[ST1]]] 
+  //      CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[O]][%[[OFF0]], %[[OFF1]]] [2, 3] [%[[ST0]], %[[ST1]]]
   // CHECK-SAME:   : tensor<?x?xf32> to tensor<2x3xf32>
   //      CHECK: linalg.copy ins(%[[I]] : tensor<2x3xf32>) outs(%[[EXTRACTED_SLICE]] : tensor<2x3xf32>) -> tensor<2x3xf32>
   //      CHECK: tensor.insert_slice %{{.*}} into %[[O]][%[[OFF0]], %[[OFF1]]] [2, 3] [%[[ST0]], %[[ST1]]]
   // CHECK-SAME:   : tensor<2x3xf32> into tensor<?x?xf32>
 
-  %0 = tensor.insert_slice %I into %O[%off0, %off1] [2, 3] [%st0, %st1] 
+  %0 = tensor.insert_slice %I into %O[%off0, %off1] [2, 3] [%st0, %st1]
     : tensor<2x3xf32> into tensor<?x?xf32>
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.insert_slice_to_copy %0 : (!transform.any_op) -> !transform.any_op
-  transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.insert_slice_to_copy %0 : (!transform.any_op) -> !transform.any_op
+    transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+    transform.yield
+  }
 }
 
 // -----
 
 // CHECK-LABEL: func @insert_slice_to_copy
     // CHECK-SAME: %[[I:[0-9a-zA-Z]+]]: tensor<?x?xf32>
-    // CHECK-SAME: %[[O:[0-9a-zA-Z]+]]: tensor<?x?xf32>, 
+    // CHECK-SAME: %[[O:[0-9a-zA-Z]+]]: tensor<?x?xf32>,
     // CHECK-SAME: %[[OFF0:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[OFF1:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[SZ0:[0-9a-zA-Z]+]]: index,
@@ -45,33 +47,35 @@ transform.sequence failures(propagate) {
     // CHECK-SAME: %[[ST0:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[ST1:[0-9a-zA-Z]+]]: index)
 func.func @insert_slice_to_copy(
-    %I : tensor<?x?xf32>, %O : tensor<?x?xf32>, 
+    %I : tensor<?x?xf32>, %O : tensor<?x?xf32>,
     %off0 : index, %off1 : index,
     %sz0 : index, %sz1 : index,
     %st0 : index, %st1 : index) -> tensor<?x?xf32> {
 
-  //      CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[O]][%[[OFF0]], %[[OFF1]]] [%[[SZ0]], %[[SZ1]]] [1, 1] 
+  //      CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[O]][%[[OFF0]], %[[OFF1]]] [%[[SZ0]], %[[SZ1]]] [1, 1]
   // CHECK-SAME:   : tensor<?x?xf32> to tensor<?x?xf32>
   //      CHECK: linalg.copy ins(%[[I]] : tensor<?x?xf32>) outs(%[[EXTRACTED_SLICE]] : tensor<?x?xf32>) -> tensor<?x?xf32>
   //      CHECK: tensor.insert_slice %{{.*}} into %[[O]][%[[OFF0]], %[[OFF1]]] [%[[SZ0]], %[[SZ1]]] [1, 1]
   // CHECK-SAME:   : tensor<?x?xf32> into tensor<?x?xf32>
 
-  %0 = tensor.insert_slice %I into %O[%off0, %off1] [%sz0, %sz1] [1, 1] 
+  %0 = tensor.insert_slice %I into %O[%off0, %off1] [%sz0, %sz1] [1, 1]
     : tensor<?x?xf32> into tensor<?x?xf32>
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.insert_slice_to_copy %0 : (!transform.any_op) -> !transform.any_op
-  transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.insert_slice_to_copy %0 : (!transform.any_op) -> !transform.any_op
+    transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+    transform.yield
+  }
 }
 
 // -----
 // CHECK-LABEL: func @insert_slice_to_copy
     // CHECK-SAME: %[[I:.*]]: tensor<2x3xf32>
-    // CHECK-SAME: %[[O:.*]]: tensor<?x?xf32>, 
+    // CHECK-SAME: %[[O:.*]]: tensor<?x?xf32>,
     // CHECK-SAME: %[[OFF0:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[OFF1:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[SZ0:[0-9a-zA-Z]+]]: index,
@@ -79,33 +83,35 @@ transform.sequence failures(propagate) {
     // CHECK-SAME: %[[ST0:[0-9a-zA-Z]+]]: index,
     // CHECK-SAME: %[[ST1:[0-9a-zA-Z]+]]: index)
 func.func @insert_slice_to_copy(
-    %I : tensor<2x3xf32>, %O : tensor<?x?xf32>, 
+    %I : tensor<2x3xf32>, %O : tensor<?x?xf32>,
     %off0 : index, %off1 : index,
     %sz0 : index, %sz1 : index,
     %st0 : index, %st1 : index) -> tensor<?x?xf32> {
 
-  //      CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[O]][%[[OFF0]], %[[OFF1]]] [2, 3] [%[[ST0]], %[[ST1]]] 
+  //      CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[O]][%[[OFF0]], %[[OFF1]]] [2, 3] [%[[ST0]], %[[ST1]]]
   // CHECK-SAME:   : tensor<?x?xf32> to tensor<2x3xf32>
   //      CHECK: linalg.copy ins(%[[I]] : tensor<2x3xf32>) outs(%[[EXTRACTED_SLICE]] : tensor<2x3xf32>) -> tensor<2x3xf32>
   //  CHECK-NOT: linalg.copy
   //      CHECK: tensor.insert_slice %{{.*}} into %[[O]][%[[OFF0]], %[[OFF1]]] [2, 3] [%[[ST0]], %[[ST1]]]
   // CHECK-SAME:   : tensor<2x3xf32> into tensor<?x?xf32>
 
-  %extracted_slice = tensor.extract_slice %O[%off0, %off1] [2, 3] [%st0, %st1] 
+  %extracted_slice = tensor.extract_slice %O[%off0, %off1] [2, 3] [%st0, %st1]
     : tensor<?x?xf32> to tensor<2x3xf32>
-  %0 = linalg.copy ins(%I : tensor<2x3xf32>) outs(%extracted_slice 
+  %0 = linalg.copy ins(%I : tensor<2x3xf32>) outs(%extracted_slice
     : tensor<2x3xf32>) -> tensor<2x3xf32>
-  %inserted_slice = tensor.insert_slice %0 into %O[%off0, %off1] [2, 3] [%st0, %st1] 
+  %inserted_slice = tensor.insert_slice %0 into %O[%off0, %off1] [2, 3] [%st0, %st1]
     : tensor<2x3xf32> into tensor<?x?xf32>
 
   return %inserted_slice : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.insert_slice_to_copy %0 : (!transform.any_op) -> !transform.any_op
-  transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.insert_slice_to_copy %0 : (!transform.any_op) -> !transform.any_op
+    transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+    transform.yield
+  }
 }
 
 // -----
@@ -120,18 +126,20 @@ func.func @parallel_insert_slice_to_copy(%out : tensor<?x?xf32>, %sz0: index, %s
     //      CHECK: scf.forall.in_parallel
     //      CHECK:   tensor.parallel_insert_slice
     scf.forall.in_parallel {
-      tensor.parallel_insert_slice %t into %arg2[0, 0] [%sz0, %sz1] [1, 1] 
+      tensor.parallel_insert_slice %t into %arg2[0, 0] [%sz0, %sz1] [1, 1]
         : tensor<?x?xf32> into tensor<?x?xf32>
     }
   }
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.parallel_insert_slice"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.insert_slice_to_copy %0
-    : (!transform.any_op) -> !transform.any_op
-  transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.parallel_insert_slice"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.insert_slice_to_copy %0
+      : (!transform.any_op) -> !transform.any_op
+    transform.cast %1 : !transform.any_op to !transform.op<"linalg.copy">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-interchange.mlir b/mlir/test/Dialect/Linalg/transform-op-interchange.mlir
index 3efb76afdfea76e..1b49abe98999888 100644
--- a/mlir/test/Dialect/Linalg/transform-op-interchange.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-interchange.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s
 
 //       CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (d1, d0)>
 
@@ -18,10 +18,12 @@ func.func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.interchange %0 iterator_interchange = [1, 0] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.interchange %0 iterator_interchange = [1, 0] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -32,11 +34,13 @@ func.func @interchange_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %a
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{transform applied to the wrong op kind}}
-  transform.structured.interchange %0 iterator_interchange = [1, 0] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{transform applied to the wrong op kind}}
+    transform.structured.interchange %0 iterator_interchange = [1, 0] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -53,10 +57,11 @@ func.func @too_many_iters(%0: tensor<?x?xf32>, %1: tensor<?x?xf32>) -> tensor<?x
   return %r : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{"iterator_interchange" has length (3) different from the number of loops in the target operation (2)}}
-  transform.structured.interchange %0 iterator_interchange = [2,1,0] : (!transform.any_op) -> !transform.any_op
-  transform.yield
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{"iterator_interchange" has length (3) different from the number of loops in the target operation (2)}}
+    transform.structured.interchange %0 iterator_interchange = [2,1,0] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-match.mlir b/mlir/test/Dialect/Linalg/transform-op-match.mlir
index 9db63dc0696dab3..7d48b1f403b3bcb 100644
--- a/mlir/test/Dialect/Linalg/transform-op-match.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-match.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics
+// RUN: mlir-opt %s --transform-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics
 
 func.func @bar() {
   // expected-remark @below {{matched op name}}
@@ -9,15 +9,17 @@ func.func @bar() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %match_name = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.test_print_remark_at_operand %match_name, "matched op name" : !transform.any_op
-  transform.test_consume_operand %match_name : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %match_name = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.test_print_remark_at_operand %match_name, "matched op name" : !transform.any_op
+    transform.test_consume_operand %match_name : !transform.any_op
 
-  %match_attr = transform.structured.match ops{["arith.constant"]} attributes{my_attr} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.test_print_remark_at_operand %match_attr, "matched attr name" : !transform.any_op
-  transform.test_consume_operand %match_attr : !transform.any_op
+    %match_attr = transform.structured.match ops{["arith.constant"]} attributes{my_attr} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.test_print_remark_at_operand %match_attr, "matched attr name" : !transform.any_op
+    transform.test_consume_operand %match_attr : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -29,12 +31,14 @@ func.func @by_type() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %match_name = transform.structured.match
-    ops{["arith.constant"]} filter_result_type = f32 in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.test_print_remark_at_operand %match_name, "matched op name" : !transform.any_op
-  transform.test_consume_operand %match_name : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %match_name = transform.structured.match
+      ops{["arith.constant"]} filter_result_type = f32 in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.test_print_remark_at_operand %match_name, "matched op name" : !transform.any_op
+    transform.test_consume_operand %match_name : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -48,11 +52,12 @@ func.func @foo(%a: tensor<4x4xf32>, %b: tensor<4x4xf32>, %c: tensor<4x4xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %matched = transform.structured.match interface{TilingInterface} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.test_print_remark_at_operand %matched, "tileable" : !transform.any_op
-  transform.yield
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %matched = transform.structured.match interface{TilingInterface} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.test_print_remark_at_operand %matched, "tileable" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -73,26 +78,28 @@ func.func @match_complex_attribute(%arg0: tensor<12x128x32xf32>)
   return %1 : tensor<128x12x32xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %match_attr = transform.structured.match
-      ops{["linalg.generic"]}
-      attributes{iterator_types = [
-        #linalg.iterator_type<parallel>,
-        #linalg.iterator_type<parallel>,
-        #linalg.iterator_type<parallel>]}
-      in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.test_print_remark_at_operand %match_attr, "matched complex attr" : !transform.any_op
-  transform.test_consume_operand %match_attr : !transform.any_op
-
-  %no_match = transform.structured.match
-      attributes{iterator_types = [
-        #linalg.iterator_type<parallel>,
-        #linalg.iterator_type<parallel>,
-        #linalg.iterator_type<reduction>]}
-      in %arg1 : (!transform.any_op) -> !transform.any_op
-// expected-remark @below {{0}}
-  transform.test_print_number_of_associated_payload_ir_ops %no_match : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %match_attr = transform.structured.match
+        ops{["linalg.generic"]}
+        attributes{iterator_types = [
+          #linalg.iterator_type<parallel>,
+          #linalg.iterator_type<parallel>,
+          #linalg.iterator_type<parallel>]}
+        in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.test_print_remark_at_operand %match_attr, "matched complex attr" : !transform.any_op
+    transform.test_consume_operand %match_attr : !transform.any_op
+
+    %no_match = transform.structured.match
+        attributes{iterator_types = [
+          #linalg.iterator_type<parallel>,
+          #linalg.iterator_type<parallel>,
+          #linalg.iterator_type<reduction>]}
+        in %arg1 : (!transform.any_op) -> !transform.any_op
+  // expected-remark @below {{0}}
+    transform.test_print_number_of_associated_payload_ir_ops %no_match : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -117,9 +124,10 @@ func.func @foo(%lb: index, %ub: index, %step: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %matched = transform.structured.match interface{LoopLikeInterface} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.test_print_remark_at_operand %matched, "loop-like" : !transform.any_op
-  transform.yield
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %matched = transform.structured.match interface{LoopLikeInterface} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.test_print_remark_at_operand %matched, "loop-like" : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir b/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir
index 0cba79d2be8a641..ee66073a9a41935 100644
--- a/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
 
 func.func @outerproduct_matmul(%A: memref<3x3xf32>, %B: memref<3x3xf32>, %C: memref<3x3xf32>) {
   linalg.matmul ins(%A, %B: memref<3x3xf32>, memref<3x3xf32>)
@@ -27,12 +27,14 @@ func.func @outerproduct_matmul(%A: memref<3x3xf32>, %B: memref<3x3xf32>, %C: mem
 // CHECK:           return
 // CHECK:         }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %2 {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %2 {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir b/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
index 49924ae93f0aa0b..6efc957f0bc4e9a 100644
--- a/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir
@@ -1,11 +1,13 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file --verify-diagnostics | FileCheck %s
 
 // CHECK-DAG: #[[$MAP13:.+]] = affine_map<() -> (13)>
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 } : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: @multitile_sizes_static
@@ -26,18 +28,20 @@ func.func @multitile_sizes_static(
 
 // -----
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %low_tile, %high_tile, %split_point =
-      transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 }
-      : (!transform.any_op) -> !transform.param<i64>
-    // expected-remark @below {{2 : i64}}
-    transform.test_print_param %low_tile : !transform.param<i64>
-    // expected-remark @below {{3 : i64}}
-    transform.test_print_param %high_tile : !transform.param<i64>
-    // expected-remark @below {{4 : i64}}
-    transform.test_print_param %split_point : !transform.param<i64>
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %low_tile, %high_tile, %split_point =
+        transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 }
+        : (!transform.any_op) -> !transform.param<i64>
+      // expected-remark @below {{2 : i64}}
+      transform.test_print_param %low_tile : !transform.param<i64>
+      // expected-remark @below {{3 : i64}}
+      transform.test_print_param %high_tile : !transform.param<i64>
+      // expected-remark @below {{4 : i64}}
+      transform.test_print_param %split_point : !transform.param<i64>
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: @multitile_sizes_static_gen
@@ -53,10 +57,12 @@ func.func @multitile_sizes_static_gen(
 
 // -----
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 } : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // CHECK: #[[$MAP_A:.+]] = affine_map<()[s0] -> ([[A_IMPL:s0 floordiv 2]])>
@@ -94,12 +100,14 @@ func.func @multitile_sizes_dynamic(
 
 // -----
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    // expected-error @below {{cannot compute parametric tile sizes for dynamically shaped payload op}}
-    transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 }
-      : (!transform.any_op) -> !transform.param<i64>
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      // expected-error @below {{cannot compute parametric tile sizes for dynamically shaped payload op}}
+      transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 }
+        : (!transform.any_op) -> !transform.param<i64>
+        transform.yield
+  }
 }
 
 func.func @multitile_sizes_dynamic_gen(
diff --git a/mlir/test/Dialect/Linalg/transform-op-pack.mlir b/mlir/test/Dialect/Linalg/transform-op-pack.mlir
index a8502d211cf80a0..cf6339ce3de82e4 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pack.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pack.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -test-transform-dialect-interpreter -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s
+// RUN: mlir-opt -transform-interpreter -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s
 
 #map = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d0)>
@@ -15,10 +15,10 @@
 //   CHECK-SAME:   %[[T1:.+]]: tensor<3xf16>
 func.func @reduction_2d_static(%t0: tensor<3x7xf16>, %t1: tensor<3xf16>) -> tensor<3xf16> {
   //      CHECK:  %[[EMPTY:.*]] = tensor.empty() : tensor<3x2x4xf16>
-  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16) 
+  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
   // CHECK-SAME:   inner_dims_pos = [1] inner_tiles = [4] into %[[EMPTY]] : tensor<3x7xf16> -> tensor<3x2x4xf16>
   //  CHECK-NOT: tensor.pack
-  //      CHECK: linalg.generic 
+  //      CHECK: linalg.generic
   // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
   // CHECK-SAME:   iterator_types = ["parallel", "reduction", "reduction"]
   // CHECK-SAME:   ins(%{{.*}} : tensor<3x2x4xf16>)
@@ -33,11 +33,13 @@ func.func @reduction_2d_static(%t0: tensor<3x7xf16>, %t1: tensor<3xf16>) -> tens
   return %2 : tensor<3xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.pack %0 packed_sizes = [0, 4]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.pack %0 packed_sizes = [0, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -57,10 +59,10 @@ transform.sequence failures(propagate) {
 //   CHECK-SAME:   %[[T1:.+]]: tensor<3xf16>
 func.func @col_reduction_2d_static(%t0: tensor<7x3xf16>, %t1: tensor<3xf16>) -> tensor<3xf16> {
   //      CHECK:  %[[EMPTY:.*]] = tensor.empty() : tensor<3x2x4xf16>
-  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16) 
+  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
   // CHECK-SAME:   outer_dims_perm = [1, 0] inner_dims_pos = [0] inner_tiles = [4] into %[[EMPTY]] : tensor<7x3xf16> -> tensor<3x2x4xf16>
   //  CHECK-NOT: tensor.pack
-  //      CHECK: linalg.generic 
+  //      CHECK: linalg.generic
   // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
   // CHECK-SAME:   iterator_types = ["reduction", "parallel", "reduction"]
   // CHECK-SAME:   ins(%{{.*}} : tensor<3x2x4xf16>)
@@ -75,18 +77,20 @@ func.func @col_reduction_2d_static(%t0: tensor<7x3xf16>, %t1: tensor<3xf16>) ->
   return %2 : tensor<3xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.pack %0 packed_sizes = [4, 0]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
-  %pack = transform.get_producer_of_operand %1[0] 
-    : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
-  %2, %pack_2, %empty_unpack_2 = 
-    transform.structured.pack_transpose %pack with_compute_op(%1) 
-    outer_perm = [1, 0]
-     : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">) 
-    -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.pack %0 packed_sizes = [4, 0]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+    %pack = transform.get_producer_of_operand %1[0]
+      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
+    %2, %pack_2, %empty_unpack_2 =
+      transform.structured.pack_transpose %pack with_compute_op(%1)
+      outer_perm = [1, 0]
+       : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
+      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -112,10 +116,10 @@ func.func @reduction_2d_dynamic(%t0: tensor<?x?xf16>, %t1: tensor<?xf16>) -> ten
   //  CHECK-DAG:     %[[D1:.*]] = tensor.dim %[[T0]], %[[C1]] : tensor<?x?xf16>
   //      CHECK:   %[[D1B4:.*]] = affine.apply #[[$DIV4]]()[%[[D1]]]
   //      CHECK:  %[[EMPTY:.*]] = tensor.empty(%[[D0]], %[[D1B4]]) : tensor<?x?x4xf16>
-  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16) 
+  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
   // CHECK-SAME:   inner_dims_pos = [1] inner_tiles = [4] into %[[EMPTY]] : tensor<?x?xf16> -> tensor<?x?x4xf16>
   //  CHECK-NOT: tensor.pack
-  //      CHECK: linalg.generic 
+  //      CHECK: linalg.generic
   // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
   // CHECK-SAME:   iterator_types = ["parallel", "reduction", "reduction"]
   // CHECK-SAME:   ins(%{{.*}} : tensor<?x?x4xf16>)
@@ -130,11 +134,13 @@ func.func @reduction_2d_dynamic(%t0: tensor<?x?xf16>, %t1: tensor<?xf16>) -> ten
   return %2 : tensor<?xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.pack %0 packed_sizes = [0, 4]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.pack %0 packed_sizes = [0, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 
@@ -156,12 +162,12 @@ transform.sequence failures(propagate) {
 //   CHECK-SAME:   %[[T0:.+]]: tensor<?x?xf16>,
 //   CHECK-SAME:   %[[T1:.+]]: tensor<?xf16>
 func.func @reduction_2d_dynamic(%t0: tensor<?x?xf16>, %t1: tensor<?xf16>) -> tensor<?xf16> {
-  //      CHECK: %[[PACKED_0:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16) 
+  //      CHECK: %[[PACKED_0:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
   // CHECK-SAME:   inner_dims_pos = [0, 1] inner_tiles = [3, 4] into %{{.*}} : tensor<?x?xf16> -> tensor<?x?x3x4xf16>
-  //      CHECK: %[[PACKED_1:.*]] = tensor.pack %[[T1]] padding_value(%{{.*}} : f16) 
+  //      CHECK: %[[PACKED_1:.*]] = tensor.pack %[[T1]] padding_value(%{{.*}} : f16)
   // CHECK-SAME:   inner_dims_pos = [0] inner_tiles = [3] into %{{.*}} : tensor<?xf16> -> tensor<?x3xf16>
   //  CHECK-NOT: tensor.pack
-  //      CHECK: linalg.generic 
+  //      CHECK: linalg.generic
   // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
   // CHECK-SAME:   iterator_types = ["parallel", "reduction", "parallel", "reduction"]
   // CHECK-SAME:   ins(%{{.*}} : tensor<?x?x3x4xf16>)
@@ -176,11 +182,13 @@ func.func @reduction_2d_dynamic(%t0: tensor<?x?xf16>, %t1: tensor<?xf16>) -> ten
   return %2 : tensor<?xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.pack %0 packed_sizes = [3, 4]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.pack %0 packed_sizes = [3, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -207,7 +215,7 @@ func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
   // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x3x2xf32>
 
   //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
-  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} 
+  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]}
   // CHECK-SAME:  ins(%{{.*}} : tensor<?x?x2x4xf32>, tensor<?x?x3x4xf32>)
   // CHECK-SAME: outs(%{{.*}} : tensor<?x?x3x2xf32>)
   %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
@@ -219,20 +227,22 @@ func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    //                                            M  N  K
-    %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      //                                            M  N  K
+      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
 
-    %unpack = transform.get_consumers_of_result %1[0] 
-      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
-    %2, %pack_2, %unpack_2 = 
-      transform.structured.pack_transpose %unpack with_compute_op(%1) 
-      outer_perm = [1, 0] inner_perm = [1, 0]
-      : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">) 
-      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
+      %unpack = transform.get_consumers_of_result %1[0]
+        : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
+      %2, %pack_2, %unpack_2 =
+        transform.structured.pack_transpose %unpack with_compute_op(%1)
+        outer_perm = [1, 0] inner_perm = [1, 0]
+        : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">)
+        -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
+        transform.yield
+  }
 }
 
 // -----
@@ -256,7 +266,7 @@ func.func @conv_2d_nchw_fchw(%i: tensor<14x512x28x28xf32>, %f: tensor<1024x512x1
   //      CHECK: %[[PACK_INPUT:.*]] = tensor.pack %{{.*}} inner_dims_pos = [1] inner_tiles = [4]
   // CHECK-SAME:   : tensor<14x1024x28x28xf32> -> tensor<14x256x28x28x4xf32>
   //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
-  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "reduction"]} 
+  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "reduction"]}
   // CHECK-SAME:  ins(%{{.*}} : tensor<14x64x28x28x8xf32>, tensor<256x64x1x1x4x8xf32>)
   // CHECK-SAME: outs(%{{.*}} : tensor<14x256x28x28x4xf32>)
   %0 = linalg.conv_2d_nchw_fchw ins(%i, %f: tensor<14x512x28x28xf32>, tensor<1024x512x1x1xf32>)
@@ -267,12 +277,14 @@ func.func @conv_2d_nchw_fchw(%i: tensor<14x512x28x28xf32>, %f: tensor<1024x512x1
   return %0: tensor<14x1024x28x28xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
-  //                                            N  F  H  W  C KH KW
-  %1 = transform.structured.pack %0 packed_sizes = [0, 4, 0, 0, 8, 0, 0]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+    //                                            N  F  H  W  C KH KW
+    %1 = transform.structured.pack %0 packed_sizes = [0, 4, 0, 0, 8, 0, 0]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -287,7 +299,7 @@ transform.sequence failures(propagate) {
 //  CHECK-SAME:   %[[FILTER:.+]]: tensor<1x?x?x?xf32>
 //  CHECK-SAME:   %[[INIT:.+]]: tensor<?x1x?x?xf32>
 func.func @conv_2d_nhwc_hwcf(%input: tensor<?x1x?x?xf32>, %filter: tensor<1x?x?x?xf32>, %init: tensor<?x1x?x?xf32>) -> tensor<?x1x?x?xf32> {
-  
+
   //      CHECK: %[[PACK_INPUT:.*]] = tensor.pack %{{.*}} inner_dims_pos = [3] inner_tiles = [6]
   // CHECK-SAME:   : tensor<?x1x?x?xf32> -> tensor<?x1x?x?x6xf32>
   //      CHECK: %[[PACK_FILTER:.*]] = tensor.pack %{{.*}} inner_dims_pos = [3, 2] inner_tiles = [4, 6]
@@ -296,24 +308,26 @@ func.func @conv_2d_nhwc_hwcf(%input: tensor<?x1x?x?xf32>, %filter: tensor<1x?x?x
   // CHECK-SAME:   : tensor<?x1x?x?xf32> -> tensor<?x1x?x?x4xf32>
 
   //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
-  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "reduction"]} 
+  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "reduction"]}
   // CHECK-SAME:  ins(%{{.*}} : tensor<?x1x?x?x6xf32>, tensor<1x?x?x?x4x6xf32>)
   // CHECK-SAME: outs(%{{.*}} : tensor<?x1x?x?x4xf32>)
   %0 = linalg.conv_2d_nhwc_hwcf
      ins (%input, %filter: tensor<?x1x?x?xf32>, tensor<1x?x?x?xf32>)
     outs (%init: tensor<?x1x?x?xf32>) -> tensor<?x1x?x?xf32>
-  
+
   //      CHECK: tensor.unpack %{{.*}} inner_dims_pos = [3] inner_tiles = [4]
   // CHECK-SAME:   : tensor<?x1x?x?x4xf32> -> tensor<?x1x?x?xf32>
   return %0 : tensor<?x1x?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
-  //                                            N  H  W  F KH KW  C
-  %1 = transform.structured.pack %0 packed_sizes = [0, 0, 0, 4, 0, 0, 6]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+    //                                            N  H  W  F KH KW  C
+    %1 = transform.structured.pack %0 packed_sizes = [0, 0, 0, 4, 0, 0, 6]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -342,7 +356,7 @@ func.func @matmul_dynamic_pack_size(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C
   //      CHECK: %[[PACK_C:.*]] = tensor.pack %[[C]] {{.*}} inner_dims_pos = [1] inner_tiles = [%[[TS]]]
   // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x?xf32>
   //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
-  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "reduction", "parallel", "reduction"]} 
+  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "reduction", "parallel", "reduction"]}
   // CHECK-SAME:  ins(%{{.*}} : tensor<?x?x?xf32>, tensor<?x?x?x?xf32>)
   // CHECK-SAME: outs(%{{.*}} : tensor<?x?x?xf32>)
   %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
@@ -354,12 +368,14 @@ func.func @matmul_dynamic_pack_size(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %sz = transform.structured.match ops{["some_tile_size"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.pack %0 packed_sizes = [0, %sz : !transform.any_op, %sz : !transform.any_op] 
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %sz = transform.structured.match ops{["some_tile_size"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.pack %0 packed_sizes = [0, %sz : !transform.any_op, %sz : !transform.any_op]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -371,13 +387,15 @@ func.func @conv_cant_pack(%i: tensor<14x512x28x28xf32>, %f: tensor<1024x512x1x1x
   return %0: tensor<14x1024x28x28xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
-  //                                                N  F  H  W  C KH KW
-  // expected-error @below {{data tiling failed}}
-  %1 = transform.structured.pack %0 packed_sizes = [0, 0, 4, 0, 0, 0, 0]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+    //                                                N  F  H  W  C KH KW
+    // expected-error @below {{data tiling failed}}
+    %1 = transform.structured.pack %0 packed_sizes = [0, 0, 4, 0, 0, 0, 0]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -393,12 +411,14 @@ func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
   return %0, %1 : tensor<?x?xf32>, tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    // expected-error @below {{requires target to map to exactly 1 LinalgOp (got 2)}}
-    %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4] 
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      // expected-error @below {{requires target to map to exactly 1 LinalgOp (got 2)}}
+      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 
@@ -412,12 +432,14 @@ func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    // expected-error @below {{requires number of packed sizes match the number of loops (2 vs 3)}}
-    %1 = transform.structured.pack %0 packed_sizes = [2, 3] 
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      // expected-error @below {{requires number of packed sizes match the number of loops (2 vs 3)}}
+      %1 = transform.structured.pack %0 packed_sizes = [2, 3]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+        transform.yield
+  }
 }
 
 // -----
@@ -429,15 +451,17 @@ func.func @no_single_packing_op(%source: tensor<128x256xf32>, %dest: tensor<4x16
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      // expected-error @below {{requires target to map to exactly 1 packing op and 1 packed op (got 2 and 1)}}
-    transform.structured.pack_transpose %0 with_compute_op(%1) 
-    inner_perm = [0]
-      : (!transform.any_op, !transform.any_op) 
-      -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+        // expected-error @below {{requires target to map to exactly 1 packing op and 1 packed op (got 2 and 1)}}
+      transform.structured.pack_transpose %0 with_compute_op(%1)
+      inner_perm = [0]
+        : (!transform.any_op, !transform.any_op)
+        -> (!transform.any_op, !transform.any_op, !transform.any_op)
+        transform.yield
+  }
 }
 
 // -----
@@ -448,15 +472,17 @@ func.func @no_single_pack_unpack(%source: tensor<128x256xf32>, %dest: tensor<4x1
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      // expected-error @below {{requires target to map to a tensor.pack or tensor.unpack}}
-    transform.structured.pack_transpose %0 with_compute_op(%1) 
-    inner_perm = [0]
-      : (!transform.any_op, !transform.any_op) 
-      -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+        // expected-error @below {{requires target to map to a tensor.pack or tensor.unpack}}
+      transform.structured.pack_transpose %0 with_compute_op(%1)
+      inner_perm = [0]
+        : (!transform.any_op, !transform.any_op)
+        -> (!transform.any_op, !transform.any_op, !transform.any_op)
+        transform.yield
+  }
 }
 
 // -----
@@ -467,15 +493,17 @@ func.func @no_linalg_target(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      // expected-error @below {{requires a LinalgOp target}}
-    transform.structured.pack_transpose %0 with_compute_op(%1) 
-    inner_perm = [0]
-      : (!transform.any_op, !transform.any_op) 
-      -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+        // expected-error @below {{requires a LinalgOp target}}
+      transform.structured.pack_transpose %0 with_compute_op(%1)
+      inner_perm = [0]
+        : (!transform.any_op, !transform.any_op)
+        -> (!transform.any_op, !transform.any_op, !transform.any_op)
+        transform.yield
+  }
 }
 
 // -----
@@ -488,15 +516,17 @@ func.func @no_single_use_by_linalg(%source: tensor<128x256xf32>, %dest: tensor<4
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      // expected-error @below {{not a single use by the LinalgOp target}}
-    transform.structured.pack_transpose %0 with_compute_op(%1) 
-    inner_perm = [0]
-      : (!transform.any_op, !transform.any_op) 
-      -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+        // expected-error @below {{not a single use by the LinalgOp target}}
+      transform.structured.pack_transpose %0 with_compute_op(%1)
+      inner_perm = [0]
+        : (!transform.any_op, !transform.any_op)
+        -> (!transform.any_op, !transform.any_op, !transform.any_op)
+        transform.yield
+  }
 }
 
 // -----
@@ -510,15 +540,17 @@ func.func @not_produced_by_linalg(%source: tensor<128x256xf32>, %dest: tensor<4x
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      // expected-error @below {{not produced by the LinalgOp target}}
-    transform.structured.pack_transpose %0 with_compute_op(%1) 
-    inner_perm = [0]
-      : (!transform.any_op, !transform.any_op) 
-      -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+        // expected-error @below {{not produced by the LinalgOp target}}
+      transform.structured.pack_transpose %0 with_compute_op(%1)
+      inner_perm = [0]
+        : (!transform.any_op, !transform.any_op)
+        -> (!transform.any_op, !transform.any_op, !transform.any_op)
+        transform.yield
+  }
 }
 
 // -----
@@ -531,15 +563,17 @@ func.func @no_matching_pack(%source: tensor<16xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      // expected-error @below {{could not find matching pack op}}
-    transform.structured.pack_transpose %0 with_compute_op(%1) 
-    inner_perm = [0]
-      : (!transform.any_op, !transform.any_op) 
-      -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+        // expected-error @below {{could not find matching pack op}}
+      transform.structured.pack_transpose %0 with_compute_op(%1)
+      inner_perm = [0]
+        : (!transform.any_op, !transform.any_op)
+        -> (!transform.any_op, !transform.any_op, !transform.any_op)
+        transform.yield
+  }
 }
 
 // -----
@@ -552,20 +586,22 @@ func.func @invalid_outer_perm(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tens
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
 
-    %unpack = transform.get_consumers_of_result %1[0] 
-      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
-    %2, %pack_2, %unpack_2 = 
-      // expected-error @below {{invalid outer_perm}}
-      transform.structured.pack_transpose %unpack with_compute_op(%1) 
-      outer_perm = [1]
-      : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">) 
-      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
+      %unpack = transform.get_consumers_of_result %1[0]
+        : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
+      %2, %pack_2, %unpack_2 =
+        // expected-error @below {{invalid outer_perm}}
+        transform.structured.pack_transpose %unpack with_compute_op(%1)
+        outer_perm = [1]
+        : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">)
+        -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
+        transform.yield
+  }
 }
 
 // -----
@@ -578,20 +614,22 @@ func.func @invalid_inner_perm(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tens
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
 
-    %unpack = transform.get_consumers_of_result %1[0] 
-      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
-    %2, %pack_2, %unpack_2 = 
-      // expected-error @below {{invalid inner_perm}}
-      transform.structured.pack_transpose %unpack with_compute_op(%1) 
-      inner_perm = [1]
-      : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">) 
-      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
+      %unpack = transform.get_consumers_of_result %1[0]
+        : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
+      %2, %pack_2, %unpack_2 =
+        // expected-error @below {{invalid inner_perm}}
+        transform.structured.pack_transpose %unpack with_compute_op(%1)
+        inner_perm = [1]
+        : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">)
+        -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
+        transform.yield
+  }
 }
 
 // -----
@@ -605,24 +643,26 @@ func.func @no_padding_on_packs(%A: tensor<32x32xf32>, %B: tensor<32x32xf32>, %C:
 }
 
 // CHECK-LABEL: no_padding_on_packs
-// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [4, 8] 
+// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [4, 8]
 // CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<8x4x4x8xf32>
-// CHECK: tensor.pack %{{.+}} outer_dims_perm = [1, 0] 
-// CHECK-SAME:  inner_dims_pos = [0, 1] inner_tiles = [8, 8] 
+// CHECK: tensor.pack %{{.+}} outer_dims_perm = [1, 0]
+// CHECK-SAME:  inner_dims_pos = [0, 1] inner_tiles = [8, 8]
 // CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<4x4x8x8xf32>
-// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [4, 8] 
+// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [4, 8]
 // CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<8x4x4x8xf32>
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1 = transform.structured.pack %0 packed_sizes = [4, 8, 8]
-      : (!transform.any_op) -> (!transform.op<"linalg.generic">)
-    %pack = transform.get_producer_of_operand %1[1]
-    : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
-    %2, %pack_2, %empty_unpack_2 =
-    transform.structured.pack_transpose %pack with_compute_op(%1)
-    outer_perm = [1, 0] inner_perm = [1, 0]
-     : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
-    -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op) 
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1 = transform.structured.pack %0 packed_sizes = [4, 8, 8]
+        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
+      %pack = transform.get_producer_of_operand %1[1]
+      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
+      %2, %pack_2, %empty_unpack_2 =
+      transform.structured.pack_transpose %pack with_compute_op(%1)
+      outer_perm = [1, 0] inner_perm = [1, 0]
+       : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
+      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad.mlir b/mlir/test/Dialect/Linalg/transform-op-pad.mlir
index 5c5d162b7c16f0a..6bca6c1fd6bf12e 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter -split-input-file -verify-diagnostics %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter -split-input-file -verify-diagnostics %s | FileCheck %s
 
 #map = affine_map<()[s0] -> (-s0 + 12, 7)>
 
@@ -33,16 +33,18 @@ func.func @static_sizes_output_divisible(%arg0: tensor<24x12xf32>,
   func.return %5 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 0]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.op<"bufferization.materialize_in_destination">)
-  // expected-remark @below {{1}}
-  test_print_number_of_associated_payload_ir_ops %copy_back : !transform.op<"bufferization.materialize_in_destination">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.op<"bufferization.materialize_in_destination">)
+    // expected-remark @below {{1}}
+    transform.test_print_number_of_associated_payload_ir_ops %copy_back : !transform.op<"bufferization.materialize_in_destination">
+    transform.yield
+  }
 }
 
 // -----
@@ -67,15 +69,17 @@ func.func @pad_to_multiple(%arg0: tensor<24x12xf32>,
   func.return %5 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pad_to_multiple_of=[2, 2, 1],
-    pack_paddings=[1, 1, 0]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pad_to_multiple_of=[2, 2, 1],
+      pack_paddings=[1, 1, 0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -109,14 +113,16 @@ func.func @static_sizes_output_divisible_on_empty_op(%arg0: tensor<24x12xf32>,
   func.return %5 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 0]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -129,15 +135,17 @@ func.func @pad(%arg0: tensor<24x12xf32>,
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{op expects a padding value of type 'f32', got 0 : i32}}
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0: i32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 0]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{op expects a padding value of type 'f32', got 0 : i32}}
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0: i32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -150,15 +158,17 @@ func.func @pad(%arg0: tensor<24x12xf32>,
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{expects a padding that parses to 'f32', got "{foo}"}}
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=["{foo}", 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 0]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{expects a padding that parses to 'f32', got "{foo}"}}
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=["{foo}", 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -173,16 +183,18 @@ func.func @pad(%arg0: tensor<24x12xf32>,
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(suppress) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // This error is silenceable and is not reported by this transform
-  //   {{transform.structured.pad failed to apply}}
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 0]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // This error is silenceable and is not reported by this transform
+    //   {{transform.structured.pad failed to apply}}
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -230,14 +242,16 @@ func.func @outs_not_produced_by_empty_or_extract_slice(%a : tensor<128x2044xf32>
   return %10 : tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 1]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 1]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -280,12 +294,14 @@ func.func @pack_everything(%arg0: tensor<24x12xf32>,
   func.return %5 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %padded, %pad, %copy_back = transform.structured.pad %0 {
-    padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
-    padding_dimensions=[0, 1, 2],
-    pack_paddings=[1, 1, 1]
-  } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %padded, %pad, %copy_back = transform.structured.pad %0 {
+      padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0, 1, 2],
+      pack_paddings=[1, 1, 1]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-replace.mlir b/mlir/test/Dialect/Linalg/transform-op-replace.mlir
index 3c6a4eb605309d3..2801522e81ac2ce 100644
--- a/mlir/test/Dialect/Linalg/transform-op-replace.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-replace.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -test-transform-dialect-interpreter %s -allow-unregistered-dialect -verify-diagnostics --split-input-file | FileCheck %s
+// RUN: mlir-opt -transform-interpreter %s -allow-unregistered-dialect -verify-diagnostics --split-input-file | FileCheck %s
 
 // CHECK: func.func @foo() {
 // CHECK:   "dummy_op"() : () -> ()
@@ -8,14 +8,16 @@ func.func @bar() {
   "another_op"() : () -> ()
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.replace %0 {
-    func.func @foo() {
-      "dummy_op"() : () -> ()
-    }
-  } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.replace %0 {
+      func.func @foo() {
+        "dummy_op"() : () -> ()
+      }
+    } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -24,13 +26,15 @@ func.func @bar(%arg0: i1) {
   "another_op"(%arg0) : (i1) -> ()
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["another_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @+1 {{expected target without operands}}
-  transform.structured.replace %0 {
-    "dummy_op"() : () -> ()
-  } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["another_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @+1 {{expected target without operands}}
+    transform.structured.replace %0 {
+      "dummy_op"() : () -> ()
+    } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -39,12 +43,14 @@ func.func @bar() {
   "another_op"() : () -> ()
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["another_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.replace %0 {
-  ^bb0(%a: i1):
-    // expected-error @+1 {{expected replacement without operands}}
-    "dummy_op"(%a) : (i1) -> ()
-  } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["another_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.replace %0 {
+    ^bb0(%a: i1):
+      // expected-error @+1 {{expected replacement without operands}}
+      "dummy_op"(%a) : (i1) -> ()
+    } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-rewrite-in-destination-passing-style.mlir b/mlir/test/Dialect/Linalg/transform-op-rewrite-in-destination-passing-style.mlir
index 723b9ceba0e326d..fb37d78b50ce47c 100644
--- a/mlir/test/Dialect/Linalg/transform-op-rewrite-in-destination-passing-style.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-rewrite-in-destination-passing-style.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt  -test-transform-dialect-interpreter --split-input-file -canonicalize %s | FileCheck %s
+// RUN: mlir-opt  -transform-interpreter --split-input-file -canonicalize %s | FileCheck %s
 
 // CHECK-LABEL: func @tensor_from_elements_0d(
 //  CHECK-SAME:     %[[arg0:.*]]: index
@@ -10,12 +10,14 @@ func.func @tensor_from_elements_0d(%arg0: index) -> tensor<index> {
   return %0 : tensor<index>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.from_elements"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.from_elements"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -33,12 +35,14 @@ func.func @tensor_from_elements_1d(%arg0: index, %arg1: index) -> tensor<2xindex
   return %0 : tensor<2xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.from_elements"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.from_elements"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -62,12 +66,14 @@ func.func @tensor_from_elements_2d(%arg0: index, %arg1: index) -> tensor<3x2xind
   return %0 : tensor<3x2xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.from_elements"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.from_elements"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -94,12 +100,14 @@ func.func @tensor_generate(%s1: index, %s2: index) -> tensor<?x?xindex> {
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.generate"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.generate"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -135,12 +143,14 @@ func.func @tensor_pad(%t1: tensor<?x10xindex>, %l2: index, %h1: index,
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -169,12 +179,14 @@ func.func @tensor_pad_constant(%t1: tensor<?x10xindex>, %l2: index, %h1: index,
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -201,12 +213,14 @@ func.func @tensor_pad_invariant(%t1: tensor<?x10xindex>, %l2: index, %h1: index,
   return %0 : tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -229,10 +243,12 @@ func.func @tensor_pad_nofold(%t1: tensor<?x?xindex>, %padding: index)
   return %0: tensor<?x?xindex>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.rewrite_in_destination_passing_style %0
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.rewrite_in_destination_passing_style %0
+      : (!transform.any_op) -> !transform.any_op
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir b/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
index 402d0e7163ea913..7d642c8995f055e 100644
--- a/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-scalarize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt -transform-interpreter %s | FileCheck %s
 
 func.func @scalarize(%arg0: tensor<24x12xf32>,
                      %arg1: tensor<12x25xf32>,
@@ -18,9 +18,11 @@ func.func @scalarize(%arg0: tensor<24x12xf32>,
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops = transform.structured.tile_using_for %0 [10, 0, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %2 = transform.structured.scalarize %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops = transform.structured.tile_using_for %0 [10, 0, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %2 = transform.structured.scalarize %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir b/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
index 719e0daccfbfa5d..bc5fffbdca44bcf 100644
--- a/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-split-reduction-by-scaling.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter %s | FileCheck %s
 
 // CHECK-LABEL: func.func @matmul_split
 func.func @matmul_split(%A : tensor<?x256xf32>, %B: tensor<256x32xf32>, %C: tensor<?x32xf32>) -> tensor<?x32xf32> {
@@ -18,10 +18,12 @@ func.func @matmul_split(%A : tensor<?x256xf32>, %B: tensor<256x32xf32>, %C: tens
   return %0: tensor<?x32xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0
-    { split_factor = 4, insert_split_dimension = 2, use_scaling_algorithm, use_alloc}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0
+      { split_factor = 4, insert_split_dimension = 2, use_scaling_algorithm, use_alloc}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir b/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
index 7b16546fb189084..af08354b1ee4a73 100644
--- a/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-split-reduction.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --split-input-file --test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt --split-input-file --transform-interpreter %s | FileCheck %s
 
 func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x256xf32>, tensor<256x32xf32>)
@@ -31,11 +31,13 @@ func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: ten
 //      CHECK: } -> tensor<16x32xf32>
 //      CHECK: return %[[R]] : tensor<16x32xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -80,11 +82,13 @@ func.func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: ten
 //      CHECK: } -> tensor<f32>
 //      CHECK: return %[[R]] : tensor<f32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 0}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 0}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -132,11 +136,13 @@ func.func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>
 //      CHECK:  } -> tensor<5x2xf32>
 //      CHECK: return %[[R]] : tensor<5x2xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -188,11 +194,13 @@ func.func @generic_split_3d_ninf(%input: tensor<32x2xf32>, %input_2: tensor<5x32
 //      CHECK:  } -> tensor<5x2xf32>
 //      CHECK: return %[[R]] : tensor<5x2xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -228,11 +236,13 @@ func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: ten
 //      CHECK: } -> tensor<16x32xf32>
 //      CHECK: return %[[R]] : tensor<16x32xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2, inner_parallel}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2, inner_parallel}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -277,11 +287,13 @@ func.func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: ten
 //      CHECK: } -> tensor<f32>
 //      CHECK: return %[[R]] : tensor<f32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 0, inner_parallel}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 0, inner_parallel}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -329,11 +341,13 @@ func.func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>
 //      CHECK:  } -> tensor<5x2xf32>
 //      CHECK: return %[[R]] : tensor<5x2xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2, inner_parallel}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2, inner_parallel}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -385,9 +399,11 @@ func.func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>
 //      CHECK:  } -> tensor<5x2xf32>
 //      CHECK: return %[[R]] : tensor<5x2xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2, inner_parallel}
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:4 = transform.structured.split_reduction %0 { split_factor = 4, insert_split_dimension = 2, inner_parallel}
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-split.mlir b/mlir/test/Dialect/Linalg/transform-op-split.mlir
index fe97c7ed79a119d..566e517d69789d4 100644
--- a/mlir/test/Dialect/Linalg/transform-op-split.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-split.mlir
@@ -1,9 +1,11 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file -verify-diagnostics | FileCheck %s
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.split %0 after 42 { dimension = 0 } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.split %0 after 42 { dimension = 0 } : !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
@@ -48,10 +50,12 @@ func.func @one_d_static(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tenso
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.split %0 after 42 { dimension = 0 } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.split %0 after 42 { dimension = 0 } : !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
@@ -81,11 +85,13 @@ func.func @one_d_static_overflow(%arg0: tensor<10xf32>, %arg1: tensor<10xf32>) -
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.split %0 after %1 { dimension = 0 } : !transform.any_op, !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.split %0 after %1 { dimension = 0 } : !transform.any_op, !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @get_size() -> index
@@ -129,11 +135,13 @@ func.func @dynamic(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1:2 = transform.structured.split %0 after 4 { dimension = 0 } : !transform.any_op
-  %2:2 = transform.structured.split %1#1 after 16 { dimension = 1 } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1:2 = transform.structured.split %0 after 4 { dimension = 0 } : !transform.any_op
+    %2:2 = transform.structured.split %1#1 after 16 { dimension = 1 } : !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
@@ -186,20 +194,24 @@ func.func @two_d(%arg0: tensor<10x34xf32>,
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  // expected-error @below {{expects either a dynamic or a static split point to be provided}}
-  %0:2 = "transform.structured.split"(%arg1) { dimension = 1, static_split_point = -9223372036854775808 } : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
+    // expected-error @below {{expects either a dynamic or a static split point to be provided}}
+    %0:2 = "transform.structured.split"(%arg1) { dimension = 1, static_split_point = -9223372036854775808 } : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{expected dynamic split point handle to point to a single-result index-typed op}}
-  transform.structured.split %0 after %1 { dimension = 0 } : !transform.any_op, !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{expected dynamic split point handle to point to a single-result index-typed op}}
+    transform.structured.split %0 after %1 { dimension = 0 } : !transform.any_op, !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @get_size() -> i64
@@ -220,12 +232,14 @@ func.func @dynamic(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{expected the dynamic split point handle to point to as many operations (0) as the target handle (1)}}
-  transform.structured.split %0 after %1 { dimension = 0 } : !transform.any_op, !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{expected the dynamic split point handle to point to as many operations (0) as the target handle (1)}}
+    transform.structured.split %0 after %1 { dimension = 0 } : !transform.any_op, !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @get_size() -> i64
@@ -244,11 +258,13 @@ func.func @dynamic(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.return"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{only applies to structured ops}}
-  transform.structured.split %0 after 16 { dimension = 1 } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.return"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{only applies to structured ops}}
+    transform.structured.split %0 after 16 { dimension = 1 } : !transform.any_op
+    transform.yield
+  }
 }
 
 func.func @noop(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100xf32> {
@@ -258,11 +274,13 @@ func.func @noop(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100xf3
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{dimension 1 does not exist in target op}}
-  transform.structured.split %0 after 16 { dimension = 1 } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{dimension 1 does not exist in target op}}
+    transform.structured.split %0 after 16 { dimension = 1 } : !transform.any_op
+    transform.yield
+  }
 }
 
 func.func @one_d_static(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<100xf32> {
@@ -280,12 +298,14 @@ func.func @one_d_static(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tenso
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{splitting does not produce the second part for a subset of targets}}
-  // expected-note @below {{expected splitting to produce the second part of all or none of the targets}}
-  %1:2 = transform.structured.split %0 after 142 { dimension = 0 } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{splitting does not produce the second part for a subset of targets}}
+    // expected-note @below {{expected splitting to produce the second part of all or none of the targets}}
+    %1:2 = transform.structured.split %0 after 142 { dimension = 0 } : !transform.any_op
+    transform.yield
+  }
 }
 
 func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
diff --git a/mlir/test/Dialect/Linalg/transform-op-tile.mlir b/mlir/test/Dialect/Linalg/transform-op-tile.mlir
index 6366c80e578fe9e..ea8c5e6124796e0 100644
--- a/mlir/test/Dialect/Linalg/transform-op-tile.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-tile.mlir
@@ -1,9 +1,11 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter --mlir-print-local-scope --split-input-file --verify-diagnostics %s | FileCheck %s
+// RUN: mlir-opt --transform-interpreter --mlir-print-local-scope --split-input-file --verify-diagnostics %s | FileCheck %s
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [4, 4, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [4, 4, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @tile_linalg_matmul(
@@ -36,11 +38,13 @@ func.func @tile_linalg_matmul(
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %2, %loops:3 = transform.structured.tile_using_for %0 [%1, %1, 4] : (!transform.any_op, !transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.match ops{["func.call"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %2, %loops:3 = transform.structured.tile_using_for %0 [%1, %1, 4] : (!transform.any_op, !transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func private @get_dynamic_tile_size() -> index
@@ -76,15 +80,17 @@ func.func @tile_linalg_matmul_dynamic(
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-note @below {{for this parameter}}
-  %1 = transform.test_produce_param (0 : i64) : !transform.param<i64>
-  // expected-error @below {{expected as many parameter values (0) as target ops (2)}}
-  transform.structured.tile_using_for %0 [%1, %1, %1]
-    : (!transform.any_op, !transform.param<i64>, !transform.param<i64>, !transform.param<i64>)
-    -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-note @below {{for this parameter}}
+    %1 = transform.test_produce_param (0 : i64) : !transform.param<i64>
+    // expected-error @below {{expected as many parameter values (0) as target ops (2)}}
+    transform.structured.tile_using_for %0 [%1, %1, %1]
+      : (!transform.any_op, !transform.param<i64>, !transform.param<i64>, !transform.param<i64>)
+      -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 func.func @tile_linalg_matmul(
@@ -101,15 +107,17 @@ func.func @tile_linalg_matmul(
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-note @below {{for this handle}}
-  %1 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{expected as many dynamic size-producing operations (0) as target ops (2)}}
-  transform.structured.tile_using_for %0 [%1, %1, 1]
-    : (!transform.any_op, !transform.any_op, !transform.any_op)
-    -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-note @below {{for this handle}}
+    %1 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{expected as many dynamic size-producing operations (0) as target ops (2)}}
+    transform.structured.tile_using_for %0 [%1, %1, 1]
+      : (!transform.any_op, !transform.any_op, !transform.any_op)
+      -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 func.func @tile_linalg_matmul(
@@ -143,11 +151,13 @@ func.func @tile_tensor_pad(
   return %0 : tensor<20x40xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.tile_using_forall %0 tile_sizes[1, 1]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.tile_using_forall %0 tile_sizes[1, 1]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+           transform.yield
+  }
 }
 
 // -----
@@ -181,11 +191,13 @@ module {
 // CHECK:             %[[SLICE_ARG2:.*]] = tensor.extract_slice %[[VAL]][%[[IV]]] [%[[SIZE]]] [1] : tensor<?xf32> to tensor<?xf32>
 // CHECK:             linalg.generic {indexing_maps = {{.*}}, iterator_types = ["parallel"]} ins(%[[SLICE_ARG0]], %[[SLICE_ARG1]] : tensor<?xf32>, tensor<?xf32>) outs(%[[SLICE_ARG2]] : tensor<?xf32>) {
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop = transform.structured.tile_using_for %0 [[4]] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-}
+  module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop = transform.structured.tile_using_for %0 [[4]] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
+  }
 
 // -----
 
@@ -215,10 +227,12 @@ func.func @scalable_and_fixed_length_tile(
   return %0 : tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [4, 4, [4]] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [4, 4, [4]] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -231,9 +245,11 @@ func.func @too_many_tiles(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>
   return %0 : tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{too many tiles provided, expected at most 3 found 4}}
-  %1, %loops = transform.structured.tile_using_for %0 [1, 0, 0, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{too many tiles provided, expected at most 3 found 4}}
+    %1, %loops = transform.structured.tile_using_for %0 [1, 0, 0, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir b/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
index 43fea65ed7f30da..0d59dbba8940d4f 100644
--- a/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s
 
 // CHECK-LABEL: @vectorize_matmul
 // CHECK-SAME: %[[A:.*]]: tensor<24x12xf32>
@@ -16,11 +16,13 @@ func.func @vectorize_matmul(%arg0: tensor<24x12xf32>,
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -41,11 +43,13 @@ func.func @vectorize_matmul_memref(%arg0: memref<24x12xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -61,11 +65,13 @@ func.func @vectorize_copy_memref(%arg0: memref<100x100xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -107,11 +113,13 @@ func.func @vectorize_keep_pad(
   return %9 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -155,11 +163,13 @@ func.func @vectorize_pad(
   return %9 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 {vectorize_padding} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 {vectorize_padding} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -172,9 +182,11 @@ func.func @vectorize(%arg0: tensor<24x12xf32>,
   func.return %0 : tensor<24x25xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{op requires isolated-from-above targets}}
-  %2 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{op requires isolated-from-above targets}}
+    %2 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-pack-greedily.mlir b/mlir/test/Dialect/Linalg/transform-pack-greedily.mlir
index 63ce9c02afb0858..100692426ef44cc 100644
--- a/mlir/test/Dialect/Linalg/transform-pack-greedily.mlir
+++ b/mlir/test/Dialect/Linalg/transform-pack-greedily.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter --split-input-file | FileCheck %s
 
 !A_mk = tensor<1023x255xf32>
 !B_kn = tensor<255x127xf32>
@@ -13,20 +13,22 @@
 func.func @matmul_mk_kn_mn(%A : !A_mk, %B : !B_kn, %C : !C_mn) -> !C_mn {
   //      CHECK: linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
-  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]} 
+  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
   // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<8x8x32x16xf32>)
   // CHECK-SAME:  outs(%{{.*}} : tensor<128x8x8x16xf32>)
   %0 = linalg.matmul ins(%A, %B : !A_mk, !B_kn) outs(%C : !C_mn) -> !C_mn
   return %0 : !C_mn
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %module_op 
-    : (!transform.any_op) -> !transform.op<"linalg.matmul">
-  transform.structured.pack_greedily %matmul 
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"linalg.matmul">
+    transform.structured.pack_greedily %matmul
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 // -----
@@ -54,7 +56,7 @@ transform.sequence failures(propagate) {
 func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   //      CHECK: linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
-  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]} 
+  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
   // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<8x8x32x16xf32>)
   // CHECK-SAME:  outs(%{{.*}} : tensor<8x128x8x16xf32>)
   %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
@@ -66,12 +68,14 @@ func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   return %0 : !C_nm
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
-  transform.structured.pack_greedily %generic
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
+    transform.structured.pack_greedily %generic
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 // -----
@@ -99,7 +103,7 @@ transform.sequence failures(propagate) {
 func.func @matmul_mk_nk_nm_transposed(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   //      CHECK: linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
-  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]} 
+  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
   // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<8x8x32x16xf32>)
   // CHECK-SAME:  outs(%{{.*}} : tensor<8x128x8x16xf32>)
   %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
@@ -111,12 +115,14 @@ func.func @matmul_mk_nk_nm_transposed(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_
   return %0 : !C_nm
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
-  transform.structured.pack_greedily %generic
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
+    transform.structured.pack_greedily %generic
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 // -----
@@ -144,7 +150,7 @@ transform.sequence failures(propagate) {
 func.func @contraction_bmkm2_nkb_nbm(%A : !A_bmkm2, %B : !B_nkb, %C : !C_nbm) -> !C_nbm {
   //      CHECK: linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$bmkm2_kkmm]], #[[$nkb_kknn]], #[[$nbm_mmnn]]]
-  // CHECK-SAME:   ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]} 
+  // CHECK-SAME:   ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
   // CHECK-SAME:   ins(%{{.*}} : tensor<42x128x8x33x32x8xf32>, tensor<8x8x42x32x16xf32>)
   // CHECK-SAME:  outs(%{{.*}} : tensor<8x42x128x8x16xf32>)
   %0 = linalg.generic #mkn_trait ins(%A, %B : !A_bmkm2, !B_nkb) outs(%C : !C_nbm) {
@@ -156,12 +162,14 @@ func.func @contraction_bmkm2_nkb_nbm(%A : !A_bmkm2, %B : !B_nkb, %C : !C_nbm) ->
   return %0 : !C_nbm
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
-  transform.structured.pack_greedily %generic
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
+    transform.structured.pack_greedily %generic
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 // -----
@@ -190,13 +198,15 @@ func.func @conv_2d_nchw_fchw(%arg0: tensor<?x47x16x16xf32>, %arg2: tensor<?x16x1
   return %0 : tensor<?x16x14x14xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %conv = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %module_op 
-    : (!transform.any_op) -> !transform.op<"linalg.conv_2d_nchw_fchw">
-  transform.structured.pack_greedily %conv
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.conv_2d_nchw_fchw">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %conv = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"linalg.conv_2d_nchw_fchw">
+    transform.structured.pack_greedily %conv
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.conv_2d_nchw_fchw">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 
@@ -219,12 +229,14 @@ func.func @reduce_and_map(%arg0: tensor<10x100xf32>,
   return %res : tensor<10xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
-  transform.structured.pack_greedily %generic
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
+    transform.structured.pack_greedily %generic
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 // -----
@@ -252,7 +264,7 @@ transform.sequence failures(propagate) {
 func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   //      CHECK: linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
-  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]} 
+  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
   // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<1x8x32x130xf32>)
   // CHECK-SAME:  outs(%{{.*}} : tensor<1x128x8x130xf32>)
   %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
@@ -264,16 +276,18 @@ func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   return %0 : !C_nm
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
-  transform.structured.pack_greedily %generic
-      // In this spec, the "k" dimension is not packed but rather padded to the
-      // next multiple of 10 (i.e. 130).
-      matmul_packed_sizes = [8, 0, 32] 
-      matmul_padded_sizes_next_multiple_of = [0, 10, 0] 
-      matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
+    transform.structured.pack_greedily %generic
+        // In this spec, the "k" dimension is not packed but rather padded to the
+        // next multiple of 10 (i.e. 130).
+        matmul_packed_sizes = [8, 0, 32]
+        matmul_padded_sizes_next_multiple_of = [0, 10, 0]
+        matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 
@@ -302,7 +316,7 @@ transform.sequence failures(propagate) {
 func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   //      CHECK: linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
-  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel"]} 
+  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel"]}
   // CHECK-SAME:   ins(%{{.*}} : tensor<1023x8x32xf32>, tensor<1x8x32x130xf32>)
   // CHECK-SAME:  outs(%{{.*}} : tensor<1x1023x130xf32>)
   %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
@@ -314,17 +328,19 @@ func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
   return %0 : !C_nm
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
-  transform.structured.pack_greedily %generic
-      // In this spec, the "n" dimension is neither packed not unpacked.
-      // We don't end up with an innermost matmul after packing but only with an
-      // innermost matvec.
-      matmul_packed_sizes = [0, 0, 32] 
-      matmul_padded_sizes_next_multiple_of = [0, 10, 0] 
-      matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
+    transform.structured.pack_greedily %generic
+        // In this spec, the "n" dimension is neither packed not unpacked.
+        // We don't end up with an innermost matmul after packing but only with an
+        // innermost matvec.
+        matmul_packed_sizes = [0, 0, 32]
+        matmul_padded_sizes_next_multiple_of = [0, 10, 0]
+        matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
+      transform.yield
+  }
 }
 
 // -----
@@ -340,13 +356,15 @@ func.func @matvec_fail(%A : !A, %x : !X, %y : !Y) -> !Y {
   return %0 : !Y
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matvec"]} in %module_op 
-    : (!transform.any_op) -> !transform.op<"linalg.matvec">
-  transform.structured.pack_greedily %matmul 
-      matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
-    : (!transform.op<"linalg.matvec">) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matvec"]} in %module_op
+      : (!transform.any_op) -> !transform.op<"linalg.matvec">
+    transform.structured.pack_greedily %matmul
+        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
+      : (!transform.op<"linalg.matvec">) -> !transform.any_op
+      transform.yield
+  }
 }
 
 // -----
@@ -360,25 +378,27 @@ func.func @no_padding_on_packs(%A: tensor<32x32xf32>, %B: tensor<32x32xf32>, %C:
 }
 
 // CHECK-LABEL: no_padding_on_packs
-// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [8, 4] 
+// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [8, 4]
 // CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<4x8x8x4xf32>
-// CHECK: tensor.pack %{{.+}} outer_dims_perm = [1, 0] 
+// CHECK: tensor.pack %{{.+}} outer_dims_perm = [1, 0]
 // CHECK-SAME:  inner_dims_pos = [0, 1] inner_tiles = [4, 16] into %{{.+}} : tensor<32x32xf32> -> tensor<2x8x4x16xf32>
-// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [8, 16] 
+// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [8, 16]
 // CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<4x2x8x16xf32>
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
-      : (!transform.any_op) -> !transform.op<"linalg.matmul">
-    %1 = transform.structured.pack_greedily %0
-        matmul_packed_sizes = [8, 16, 4] matmul_inner_dims_order = [0, 1, 2]
-      : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic">
-    %pack = transform.get_producer_of_operand %1[1]
-    : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
-    %2, %pack_2, %empty_unpack_2 =
-    transform.structured.pack_transpose %pack with_compute_op(%1)
-    outer_perm = [1, 0] inner_perm = [1, 0]
-     : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
-    -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
+        : (!transform.any_op) -> !transform.op<"linalg.matmul">
+      %1 = transform.structured.pack_greedily %0
+          matmul_packed_sizes = [8, 16, 4] matmul_inner_dims_order = [0, 1, 2]
+        : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic">
+      %pack = transform.get_producer_of_operand %1[1]
+      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
+      %2, %pack_2, %empty_unpack_2 =
+      transform.structured.pack_transpose %pack with_compute_op(%1)
+      outer_perm = [1, 0] inner_perm = [1, 0]
+       : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
+      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index b118d6f02ab98f7..77ce4d0b211f0d7 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-linalg-transform-patterns=test-patterns -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -test-linalg-transform-patterns=test-patterns -split-input-file | FileCheck %s
 
 func.func @dot(%x: memref<?xf32, strided<[1], offset: ?>>,
           %y: memref<?xf32, strided<[1], offset: ?>>,
@@ -9,10 +9,12 @@ func.func @dot(%x: memref<?xf32, strided<[1], offset: ?>>,
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop = transform.structured.tile_using_for %0 [8000] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop = transform.structured.tile_using_for %0 [8000] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: func @dot
@@ -33,10 +35,12 @@ func.func @matvec(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [5, 6] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [5, 6] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: func @matvec
@@ -60,13 +64,15 @@ func.func @matmul(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:3 = transform.structured.tile_using_for %0 [2000, 3000, 4000] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-    %2, %loops_2:3 = transform.structured.tile_using_for %1 [200, 300, 400] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-    %3, %loops_3:3 = transform.structured.tile_using_for %2 [20, 30, 40] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-    %4, %loops_4:3 = transform.structured.tile_using_for %3 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:3 = transform.structured.tile_using_for %0 [2000, 3000, 4000] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      %2, %loops_2:3 = transform.structured.tile_using_for %1 [200, 300, 400] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      %3, %loops_3:3 = transform.structured.tile_using_for %2 [20, 30, 40] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      %4, %loops_4:3 = transform.structured.tile_using_for %3 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: func @matmul
@@ -133,10 +139,12 @@ func.func @permute_generic(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.interchange %0 iterator_interchange = [1, 2, 0] : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.interchange %0 iterator_interchange = [1, 2, 0] : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL:  func @permute_generic
@@ -159,10 +167,12 @@ func.func @matvec_perm(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [5, 6] {interchange = [1, 0]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [5, 6] {interchange = [1, 0]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: func @matvec_perm
@@ -186,12 +196,14 @@ func.func @matmul_perm(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:3 = transform.structured.tile_using_for %0 [2000, 3000, 4000] {interchange = [1, 2, 0]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-    %2, %loops_2:3 = transform.structured.tile_using_for %1 [200, 300, 400] {interchange = [1, 0, 2]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-    %3, %loops_3:3 = transform.structured.tile_using_for %2 [20, 30, 40] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:3 = transform.structured.tile_using_for %0 [2000, 3000, 4000] {interchange = [1, 2, 0]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      %2, %loops_2:3 = transform.structured.tile_using_for %1 [200, 300, 400] {interchange = [1, 0, 2]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      %3, %loops_3:3 = transform.structured.tile_using_for %2 [20, 30, 40] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-LABEL: func @matmul_perm
diff --git a/mlir/test/Dialect/Linalg/transform-promotion.mlir b/mlir/test/Dialect/Linalg/transform-promotion.mlir
index 2f98e394fe05198..362719b730334a4 100644
--- a/mlir/test/Dialect/Linalg/transform-promotion.mlir
+++ b/mlir/test/Dialect/Linalg/transform-promotion.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 func.func @promote_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
                              %arg1: memref<?x?xf32, strided<[?, 1], offset: ?>>,
@@ -58,10 +58,12 @@ func.func @promote_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset:
 // CHECK-SAME:                 ins(%[[v0]], %[[v1]] : memref<?x?xf32>, memref<?x?xf32>)
 // CHECK-SAME:                outs(%[[v2]] : memref<?x?xf32>)
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.promote %0 { operands_to_promote = [0, 1, 2], use_full_tiles_by_default } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 { operands_to_promote = [0, 1, 2], use_full_tiles_by_default } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -118,12 +120,11 @@ func.func @promote_first_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], o
 // CHECK-SAME:           ins(%[[v0]], %[[s1]] : memref<?x?xf32>, memref<?x?xf32, strided<[?, ?], offset: ?>>)
 // CHECK-SAME:          outs(%[[s2]] : memref<?x?xf32, strided<[?, ?], offset: ?>>)
 
-transform.with_pdl_patterns {
-^bb0(%arg0: !transform.any_op):
-  sequence %arg0 : !transform.any_op failures(propagate) {
-    ^bb0(%arg1: !transform.any_op):
-      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      %1 = transform.structured.promote %0 { operands_to_promote = [0], use_full_tiles_by_default } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 { operands_to_promote = [0], use_full_tiles_by_default } : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
 
@@ -151,12 +152,11 @@ func.func @aligned_promote_fill(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?
 // CHECK:         memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
 // CHECK:         linalg.fill ins(%[[cf]] : f32) outs(%[[v0]] : memref<?x?xf32>)
 
-transform.with_pdl_patterns {
-^bb0(%arg0: !transform.any_op):
-  sequence %arg0 : !transform.any_op failures(propagate) {
-    ^bb0(%arg1: !transform.any_op):
-      %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      %1 = transform.structured.promote %0 { operands_to_promote = [1], use_full_tile_buffers = [false, true], alignment = 32} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 { operands_to_promote = [1], use_full_tile_buffers = [false, true], alignment = 32} : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
 
@@ -185,11 +185,10 @@ func.func @aligned_promote_fill_complex(%arg0: memref<?x?xcomplex<f32>, strided<
 // CHECK:         memref.copy %[[s0]], %[[l0]] : memref<?x?xcomplex<f32>, strided{{.*}}> to memref<?x?xcomplex<f32>, strided{{.*}}>
 // CHECK:         linalg.fill ins(%[[cc]] : complex<f32>) outs(%[[v0]] : memref<?x?xcomplex<f32>>)
 
-transform.with_pdl_patterns {
-^bb0(%arg0: !transform.any_op):
-  sequence %arg0 : !transform.any_op failures(propagate) {
-    ^bb0(%arg1: !transform.any_op):
-      %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-      %1 = transform.structured.promote %0 { operands_to_promote = [1], use_full_tile_buffers = [false, true], alignment = 32} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.promote %0 { operands_to_promote = [1], use_full_tile_buffers = [false, true], alignment = 32} : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir b/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
index aaf3fceace2d0e1..49596f63b648a19 100644
--- a/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
+++ b/mlir/test/Dialect/Linalg/transform-tile-and-fuse.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file -canonicalize | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file -canonicalize | FileCheck %s
 
 // This is a simple tile-and-fuse example with a single fusion group.
 
@@ -40,19 +40,21 @@ module {
     return %7 : tensor<?x?xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    // Find the root and all producers.
-    %root = transform.structured.match attributes{"__root__"} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %producers = transform.structured.match attributes{"__producer__"} in %arg1 : (!transform.any_op) -> !transform.any_op
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      // Find the root and all producers.
+      %root = transform.structured.match attributes{"__root__"} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %producers = transform.structured.match attributes{"__producer__"} in %arg1 : (!transform.any_op) -> !transform.any_op
 
-    // Tile the root.
-    %tiled_op, %forall_op = transform.structured.tile_using_forall %root num_threads [10, 20]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      // Tile the root.
+      %tiled_op, %forall_op = transform.structured.tile_using_forall %root num_threads [10, 20]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-    // Fuse all producers.
-    transform.structured.fuse_into_containing_op %producers into %forall_op
-      : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+      // Fuse all producers.
+      transform.structured.fuse_into_containing_op %producers into %forall_op
+        : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
 
@@ -99,19 +101,21 @@ module {
     return %7 : tensor<?x?xf32>
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%arg1: !transform.any_op):
-    // Find the root and all producers.
-    %root = transform.structured.match attributes{"__root__"} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %producers = transform.structured.match attributes{"__producer__"} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %reversed_producers = transform.test_reverse_payload_ops %producers : (!transform.any_op) -> !transform.any_op
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      // Find the root and all producers.
+      %root = transform.structured.match attributes{"__root__"} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %producers = transform.structured.match attributes{"__producer__"} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %reversed_producers = transform.test_reverse_payload_ops %producers : (!transform.any_op) -> !transform.any_op
 
-    // Tile the root.
-    %tiled_op, %forall_op = transform.structured.tile_using_forall %root num_threads [10, 20]
-         : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      // Tile the root.
+      %tiled_op, %forall_op = transform.structured.tile_using_forall %root num_threads [10, 20]
+           : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-    // Fuse all producers.
-    transform.structured.fuse_into_containing_op %reversed_producers into %forall_op
-      : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+      // Fuse all producers.
+      transform.structured.fuse_into_containing_op %reversed_producers into %forall_op
+        : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+        transform.yield
+    }
   }
 }
diff --git a/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir b/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir
index 5098d1b4b485b5b..0e1512717a22def 100644
--- a/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir
+++ b/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -canonicalize -cse -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -canonicalize -cse -verify-diagnostics | FileCheck %s
 
 func.func @reduction_tile(%arg0: tensor<?x?xf32>, %out: tensor<?xf32>) -> tensor<?xf32> {
   %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
@@ -14,11 +14,13 @@ func.func @reduction_tile(%arg0: tensor<?x?xf32>, %out: tensor<?xf32>) -> tensor
   return %red : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_for %0
-    by tile_sizes = [0, 5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_for %0
+      by tile_sizes = [0, 5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
@@ -67,11 +69,13 @@ func.func @reduction_tile_transpose(%arg0: tensor<?x?xf32>, %out: tensor<?xf32>)
   return %red : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_for %0
-    by tile_sizes = [5, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_for %0
+      by tile_sizes = [5, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 5)>
@@ -106,11 +110,13 @@ func.func @reduction_tile_parallel(
   return %red : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
-    by num_threads = [0, 5], tile_sizes = [] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
+      by num_threads = [0, 5], tile_sizes = [] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (-(d0 * (s0 ceildiv 5)) + s0, s0 ceildiv 5)>
@@ -158,11 +164,13 @@ func.func @matmul_tile_parallel(
   return %matmul : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
-    by num_threads = [0, 0, 5], tile_sizes = [] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
+      by num_threads = [0, 0, 5], tile_sizes = [] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (-(d0 * (s0 ceildiv 5)) + s0, s0 ceildiv 5)>
@@ -217,11 +225,13 @@ func.func @reduction_tile_parallel_cyclic_dist(
   return %red : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
-    by num_threads = [0, 5], tile_sizes = [0, 3], mapping = [#gpu.thread<x>] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
+      by num_threads = [0, 5], tile_sizes = [0, 3], mapping = [#gpu.thread<x>] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // CHECK-DAG: #[[MAP0:.*]] = affine_map<()[s0] -> (s0 * 3)>
@@ -282,23 +292,25 @@ func.func @reduction_tile_parallel_cyclic_dist(
   return %red : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
-    by num_threads = [0, 5], tile_sizes = [0, 3], mapping = [#gpu.thread<x>] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-
-  //      CHECK:     expecting fill
-  // CHECK-NEXT:     linalg.fill
-  transform.print %1 {name = "expecting fill"} : !transform.any_op
-  //      CHECK:     expecting parallel reduction
-  // CHECK-NEXT:     linalg.generic
-  //      CHECK:     iterator_types = ["parallel", "reduction"]
-  transform.print %2 {name = "expecting parallel reduction"} : !transform.any_op
-  //      CHECK:     expecting parallel reduction
-  // CHECK-NEXT:     linalg.generic
-  //      CHECK:     iterator_types = ["parallel", "reduction"]
-  transform.print %3 {name = "expecting parallel reduction"} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
+      by num_threads = [0, 5], tile_sizes = [0, 3], mapping = [#gpu.thread<x>] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+
+    //      CHECK:     expecting fill
+    // CHECK-NEXT:     linalg.fill
+    transform.print %1 {name = "expecting fill"} : !transform.any_op
+    //      CHECK:     expecting parallel reduction
+    // CHECK-NEXT:     linalg.generic
+    //      CHECK:     iterator_types = ["parallel", "reduction"]
+    transform.print %2 {name = "expecting parallel reduction"} : !transform.any_op
+    //      CHECK:     expecting parallel reduction
+    // CHECK-NEXT:     linalg.generic
+    //      CHECK:     iterator_types = ["parallel", "reduction"]
+    transform.print %3 {name = "expecting parallel reduction"} : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -319,13 +331,15 @@ func.func @reduction_untiled_forall(
   return %red : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{could not tile reduction}}
-  %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
-    by num_threads = [5], tile_sizes = [3], mapping = [#gpu.thread<x>] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{could not tile reduction}}
+    %1, %2, %3, %loop = transform.structured.tile_reduction_using_forall %0
+      by num_threads = [5], tile_sizes = [3], mapping = [#gpu.thread<x>] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
 
+      transform.yield
+  }
 }
 
 // -----
@@ -346,11 +360,13 @@ module {
     } -> tensor<?xf32>
     return %0 : tensor<?xf32>
   }
-  transform.sequence failures(propagate) {
-  ^bb0(%arg0: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-    // expected-error @below {{transform.structured.tile_reduction_using_for failed to apply}}
-    %fill_op, %split_linalg_op, %combining_linalg_op, %for_op = transform.structured.tile_reduction_using_for %0 by tile_sizes = [0, 5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+      // expected-error @below {{transform.structured.tile_reduction_using_for failed to apply}}
+      %fill_op, %split_linalg_op, %combining_linalg_op, %for_op = transform.structured.tile_reduction_using_for %0 by tile_sizes = [0, 5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -369,10 +385,12 @@ module {
     } -> tensor<4096xf32>
     return %0 : tensor<4096xf32>
   }
-  transform.sequence  failures(propagate) {
-  ^bb0(%arg0: !transform.any_op):
-    %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-    %fill_op, %split_linalg_op, %combining_linalg_op, %for_op = transform.structured.tile_reduction_using_for %0 by tile_sizes = [0, 2, 64] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+      %fill_op, %split_linalg_op, %combining_linalg_op, %for_op = transform.structured.tile_reduction_using_for %0 by tile_sizes = [0, 2, 64] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+    }
   }
 }
 
diff --git a/mlir/test/Dialect/Linalg/vectorization-scalable.mlir b/mlir/test/Dialect/Linalg/vectorization-scalable.mlir
index 641b626f576e1ea..d6f8d78358370ab 100644
--- a/mlir/test/Dialect/Linalg/vectorization-scalable.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization-scalable.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 func.func @vectorize_dynamic_identity(%arg0: tensor<?xf32>,
                                       %arg1: tensor<?xf32>,
@@ -26,10 +26,12 @@ func.func @vectorize_dynamic_identity(%arg0: tensor<?xf32>,
 // CHECK:           %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<[4]xf32>
 // CHECK:           %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<[4]xf32>, tensor<?xf32> } : vector<[4]xi1> -> tensor<?xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [[4]] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [[4]] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -68,10 +70,12 @@ func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>,
 // CHECK:           %[[VAL_16:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write %[[VAL_14]], %[[VAL_2]][%[[VAL_15]], %[[VAL_15]]] {in_bounds = [true, true]} : vector<8x[32]xf32>, tensor<8x?xf32> } : vector<8x[32]xi1> -> tensor<8x?xf32>
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, [32]] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, [32]] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -108,10 +112,12 @@ func.func @vectorize_static_shape_with_mask(%arg0: tensor<8x30xf32>,
 // CHECK:           %[[VAL_14:.*]] = arith.constant 0 : index
 // CHECK:           %[[VAL_15:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %[[VAL_13]], %[[VAL_2]][%[[VAL_14]], %[[VAL_14]]] {in_bounds = [true, true]} : vector<8x[32]xf32>, tensor<8x30xf32> } : vector<8x[32]xi1> -> tensor<8x30xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, [32]] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, [32]] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -128,9 +134,11 @@ func.func @vectorize_dynamic_fill(%A : tensor<?x?xf32>, %arg0 : f32) -> tensor<?
 //   CHECK: %[[BCAST:.*]] = vector.broadcast %{{.*}} : f32 to vector<8x[16]xf32>
 //   CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[BCAST]], {{.*}} {in_bounds = [true, true]} : vector<8x[16]xf32>, tensor<?x?xf32> } : vector<8x[16]xi1>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, [16]] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, [16]] : !transform.any_op
+    transform.yield
+  }
 }
 
diff --git a/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir
index ecba1f32468031e..a43cc2df88cadfe 100644
--- a/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 // CHECK-LABEL: contraction_dot
 func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref<f32>) {
@@ -10,10 +10,12 @@ func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memre
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0  : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0  : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -28,11 +30,13 @@ func.func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -46,11 +50,13 @@ func.func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf3
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -65,11 +71,13 @@ func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -105,11 +113,13 @@ func.func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -145,11 +155,13 @@ func.func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -172,11 +184,13 @@ func.func @generic_interchanged_transpose(%arg0: tensor<12x128x32xf32>) -> tenso
   return %1 : tensor<128x12x32xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -212,11 +226,13 @@ func.func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -232,11 +248,13 @@ func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -256,11 +274,13 @@ func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -280,11 +300,13 @@ func.func @test_do_not_vectorize_unsupported_element_types(%A : memref<8x16xcomp
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -325,11 +347,13 @@ func.func @vectorize_affine_apply(%arg0: tensor<5xf32>, %arg3: index) -> tensor<
 // CHECK:   %[[CAST:.*]] = arith.index_cast %[[ADDI_4]] : vector<5xindex> to vector<5xi32>
 // CHECK:   vector.transfer_write %[[CAST]], %[[EMPTY]][%[[C0:.*]]] {in_bounds = [true]} : vector<5xi32>, tensor<5xi32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+  }
 }
 
 // -----
@@ -342,11 +366,13 @@ func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -360,11 +386,13 @@ func.func @test_vectorize_fill_scalar(%A : memref<f32>, %arg0 : f32) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -377,11 +405,13 @@ func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -397,11 +427,13 @@ func.func @test_vectorize_copy_scalar(%A : memref<f32>, %B : memref<f32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -413,11 +445,13 @@ func.func @test_vectorize_copy_complex(%A : memref<8x16xcomplex<f32>>, %B : memr
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -441,11 +475,13 @@ func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -470,11 +506,13 @@ func.func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -555,11 +593,13 @@ func.func @generic_vectorize(%arg0: memref<4x256xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -646,11 +686,13 @@ func.func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
     tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -690,11 +732,13 @@ func.func @generic_vectorize_broadcast_transpose(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -733,11 +777,13 @@ func.func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -765,11 +811,13 @@ func.func @matmul_tensors(
   return %0 : tensor<8x12xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -794,11 +842,13 @@ func.func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -823,11 +873,13 @@ func.func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tenso
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -860,11 +912,13 @@ func.func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: in
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -880,11 +934,13 @@ func.func @pad_static_complex(%arg0: tensor<2x5x2xcomplex<f32>>, %pad_value: com
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -910,11 +966,13 @@ func.func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -943,11 +1001,13 @@ func.func @pad_and_transfer_write_static(
   return %3 : tensor<5x6xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -980,11 +1040,13 @@ func.func @pad_and_transfer_write_dynamic_static(
   return %3 : tensor<?x6xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -1014,11 +1076,13 @@ func.func @pad_and_insert_slice_source(
   return %r : tensor<12x13xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -1042,11 +1106,13 @@ func.func @pad_and_insert_slice_dest(
   return %r : tensor<1x12x13xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1079,11 +1145,13 @@ func.func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x1
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1114,11 +1182,13 @@ func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1159,11 +1229,13 @@ func.func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1189,11 +1261,13 @@ func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1220,11 +1294,13 @@ func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1250,11 +1326,13 @@ func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1280,11 +1358,13 @@ func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1310,11 +1390,13 @@ func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1340,11 +1422,13 @@ func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1374,11 +1458,13 @@ func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) ->
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1412,15 +1498,17 @@ func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
 
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1459,11 +1547,13 @@ func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
   return %2 : tensor<f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -1490,11 +1580,13 @@ func.func @not_projected_permutation(%arg0: tensor<8x8xf32>) -> tensor<6x6x3x3xf
   return %result : tensor<6x6x3x3xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1529,11 +1621,13 @@ func.func @mixed_parallel_reduced_results(%arg0 : tensor<2x4x8xf32>,
 //   CHECK-DAG:   vector.transfer_write %[[MUL]], %[[ARG2]]
 //   CHECK-DAG:   vector.transfer_write %[[ADD]], %[[ARG3]]
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1553,11 +1647,13 @@ func.func @vectorize_map(%arg0: memref<64xf32>,
 // CHECK-NEXT:    %[[RHS:.*]] = vector.transfer_read
 // CHECK-NEXT:    arith.addf %[[LHS]], %[[RHS]] : vector<64xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1572,11 +1668,13 @@ func.func @vectorize_transpose(%arg0: memref<16x32x64xf32>,
 // CHECK:         vector.transpose
 // CHECK-SAME:      [1, 2, 0] : vector<16x32x64xf32> to vector<32x64x16xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1595,11 +1693,13 @@ func.func @vectorize_reduce(%arg0: memref<16x32x64xf32>,
 // CHECK:         vector.multi_reduction <add>
 // CHECK-SAME:    : vector<16x32x64xf32> to vector<16x64xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1628,10 +1728,12 @@ func.func @not_vectorizable(%arg0: tensor<1x?xf32>, %arg1: index, %arg2: index,
   }
   return %1 : tensor<1x128xf32>
 }
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1662,11 +1764,13 @@ func.func @wrong_reduction_detection(%input: tensor<120x64xf32>) -> tensor<120x6
   return %1 : tensor<120x64xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: @wrong_reduction_detection
@@ -1691,11 +1795,13 @@ func.func @tensor_size0(%arg0: tensor<0xf32>,
   return %0 : tensor<f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1712,11 +1818,13 @@ func.func @test_masked_pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: inde
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -1734,11 +1842,13 @@ func.func @zero_dim_tensor(%input: tensor<f32>, %output: tensor<f32>) -> tensor<
   return %0 : tensor<f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @zero_dim_tensor
@@ -1771,11 +1881,13 @@ func.func @multi_output_generic_different_perm_maps(%in0: tensor<4x1xf32>,
   return %13#0, %13#1 : tensor<4x1xf32>, tensor<1x4xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @multi_output_generic_different_perm_maps
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index ddeaff76a04df23..7f4af344886f498 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 func.func @vectorize_dynamic_identity(%arg0: tensor<?xf32>,
                                       %arg1: tensor<?xf32>,
@@ -26,10 +26,12 @@ func.func @vectorize_dynamic_identity(%arg0: tensor<?xf32>,
 // CHECK:           %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32>
 // CHECK:           %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<?xf32> } : vector<4xi1> -> tensor<?xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [4] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -60,10 +62,12 @@ func.func @vectorize_dynamic_1d_broadcast(%arg0: tensor<?xf32>,
 // CHECK:           %[[VAL_13:.*]] = arith.addf %[[VAL_7]], %[[VAL_10]] : vector<4xf32>
 // CHECK:           %[[VAL_14:.*]] = vector.mask %{{.*}} { vector.transfer_write %[[VAL_13]], {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<?xf32> } : vector<4xi1> -> tensor<?xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [4] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -98,10 +102,12 @@ func.func @vectorize_dynamic_2d_transpose(%arg0: tensor<?x?xf32>,
 // CHECK:           %[[VAL_16:.*]] = arith.addf %[[VAL_10]], %[[VAL_13]] : vector<4x8xf32>
 // CHECK:           %[[VAL_17:.*]] = vector.mask %[[VAL_12]] { vector.transfer_write %[[VAL_16]], %{{.*}} {in_bounds = [true, true]} : vector<4x8xf32>, tensor<?x?xf32> } : vector<4x8xi1> -> tensor<?x?xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -135,10 +141,12 @@ func.func @vectorize_dynamic_generic_2d_broadcast(%arg0: tensor<?x?xf32>,
 // CHECK:           %[[VAL_16:.*]] = arith.addf %[[VAL_10]], %[[VAL_13]] : vector<4x8xf32>
 // CHECK:           %[[VAL_18:.*]] = vector.mask %[[VAL_12]] { vector.transfer_write %{{.*}} {in_bounds = [true, true]} : vector<4x8xf32>, tensor<?x?xf32> } : vector<4x8xi1> -> tensor<?x?xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -157,10 +165,12 @@ func.func @vectorize_dynamic_reduction(%arg0: tensor<?x?xf32>,
   return %0 : tensor<?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL:   @vectorize_dynamic_reduction(
@@ -195,10 +205,12 @@ func.func @vectorize_dynamic_transpose_reduction(%arg0: tensor<?x?x?xf32>,
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [4, 8, 16] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [4, 8, 16] : !transform.any_op
+    transform.yield
+  }
 }
 
 // CHECK-LABEL:   @vectorize_dynamic_transpose_reduction(
@@ -253,10 +265,12 @@ func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>,
 // CHECK:           %[[VAL_16:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write %[[VAL_14]], %[[VAL_2]][%[[VAL_15]], %[[VAL_15]]] {in_bounds = [true, true]} : vector<8x32xf32>, tensor<8x?xf32> } : vector<8x32xi1> -> tensor<8x?xf32>
 
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -280,10 +294,12 @@ func.func @do_not_generate_masks(%arg0: tensor<8x32xf32>,
 // CHECK-LABEL: func.func @do_not_generate_masks
 // CHECK-NOT: vector.mask
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -320,10 +336,12 @@ func.func @vectorize_static_shape_with_mask(%arg0: tensor<8x30xf32>,
 // CHECK:           %[[VAL_14:.*]] = arith.constant 0 : index
 // CHECK:           %[[VAL_15:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %[[VAL_13]], %[[VAL_2]][%[[VAL_14]], %[[VAL_14]]] {in_bounds = [true, true]} : vector<8x32xf32>, tensor<8x30xf32> } : vector<8x32xi1> -> tensor<8x30xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -340,10 +358,12 @@ func.func @vectorize_dynamic_fill(%A : tensor<?x?xf32>, %arg0 : f32) -> tensor<?
 //   CHECK: %[[BCAST:.*]] = vector.broadcast %{{.*}} : f32 to vector<8x16xf32>
 //   CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[BCAST]], {{.*}} {in_bounds = [true, true]} : vector<8x16xf32>, tensor<?x?xf32> } : vector<8x16xi1>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [8, 16] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [8, 16] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -361,10 +381,12 @@ func.func @test_masked_vectorize_linalg_copy(%A : memref<?x?xf32>, %B : memref<?
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -396,11 +418,13 @@ func.func @test_masked_vectorize_pad(
   return %1: tensor<2x4xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -438,11 +462,13 @@ func.func @test_masked_vectorize_dynamic_pad(
   return %1: tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -473,10 +499,12 @@ func.func @matmul(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>)
 // CHECK:           %[[C2:.*]] = arith.constant 0 : index
 // CHECK:           vector.mask %[[MASK_C]] { vector.transfer_write %[[MULTI_RED]], %[[C]]{{\[}}%[[C2]], %[[C2]]] {in_bounds = [true, true]} : vector<8x16xf32>, memref<?x?xf32> } : vector<8x16xi1>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %matmul vector_sizes [8, 16, 4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %matmul vector_sizes [8, 16, 4] : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -507,8 +535,10 @@ func.func @matmul_scalable(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<
 // CHECK:           %[[C2:.*]] = arith.constant 0 : index
 // CHECK:           vector.mask %[[MASK_C]] { vector.transfer_write %[[MULTI_RED]], %[[C]]{{\[}}%[[C2]], %[[C2]]] {in_bounds = [true, true]} : vector<8x[16]xf32>, memref<?x?xf32> } : vector<8x[16]xi1>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %matmul vector_sizes [8, [16], 4] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %matmul vector_sizes [8, [16], 4] : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir
index 3187385b5398816..edc38b42f5cdb21 100644
--- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x3xf32>) -> tensor<1x3xf32> {
   %c79 = arith.constant 79 : index
@@ -25,11 +25,13 @@ func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguou
 // CHECK:           %[[VAL_20:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<80x16xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32>
 // CHECK:           %[[VAL_22:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
+     transform.yield
+   }
+}
 
  // -----
 
@@ -80,10 +82,12 @@ func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguo
 // CHECK:           return %[[VAL_27]] : tensor<?x?xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
+     transform.yield
+  }
 }
 
 // -----
@@ -118,11 +122,13 @@ func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tenso
 // CHECK:           %[[VAL_23:.*]] = vector.mask %[[VAL_8]] { vector.gather {{.*}} : tensor<80x16xf32>, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32>
 // CHECK:           %[[VAL_25:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
+     transform.yield
+   }
+}
 
  // -----
 
@@ -173,11 +179,13 @@ func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_gather(%
 // CHECK:           return %[[VAL_27]] : tensor<?x?xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -223,11 +231,13 @@ func.func @extract_masked_vectorize(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf3
 // CHECK:           %[[VAL_23:.*]] = arith.constant 0 : index
 // CHECK:           %[[VAL_24:.*]] = vector.mask %[[VAL_10]] { vector.transfer_write %[[VAL_22]], %[[VAL_1]]{{\[}}%[[VAL_23]], %[[VAL_23]]] {in_bounds = [true, true]} : vector<3x3xf32>, tensor<?x?xf32> } : vector<3x3xi1> -> tensor<?x?xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   transform.structured.vectorize %0 vector_sizes [3, 3] vectorize_nd_extract : !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     transform.structured.vectorize %0 vector_sizes [3, 3] vectorize_nd_extract : !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -266,8 +276,10 @@ func.func @tensor_extract_dynamic_shape(%arg1: tensor<123x321xf32>, %arg2: tenso
 // CHECK:           %[[C0_1:.*]] = arith.constant 0 : index
 // CHECK:           vector.mask %[[MASK]] { vector.gather %[[ARG_1]][%[[C0_1]], %[[C0_1]]] [%{{.*}}], %[[MASK_2]], %[[FALLTHROUGH]] : tensor<123x321xf32>, vector<1x3x8xindex>, vector<1x3x8xi1>, vector<1x3x8xf32> into vector<1x3x8xf32> } : vector<1x3x8xi1> -> vector<1x3x8xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   transform.structured.vectorize %0 vector_sizes [1, 3, 8] vectorize_nd_extract : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     transform.structured.vectorize %0 vector_sizes [1, 3, 8] vectorize_nd_extract : !transform.any_op
+     transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
index 5cf9c81dff69553..3fd4fcd536624c3 100644
--- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d2)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
@@ -27,11 +27,13 @@ func.func @vectorize_1d_tensor_extract(%arg0: tensor<3xf32>, %arg1: tensor<4x3xi
 // CHECK: %[[GATHER:.*]] = vector.gather %[[ARG0]][%[[C0]]] [%[[INDICES]]], %[[MASK]], %[[PASSTHRU]]
 // CHECK: vector.transfer_write %[[GATHER]]
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -62,11 +64,13 @@ func.func @vectorize_nd_tensor_extract_constant_idx(%arg0: tensor<3x3xf32>, %arg
 // CHECK:           %[[C0_4:.*]] = arith.constant 0 : index
 // CHECK:           vector.transfer_write %[[READ]], %[[ARG_1]][%[[C0_4]], %[[C0_4]], %[[C0_4]]]  : vector<1x1x3xf32>, tensor<1x1x3xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 { vectorize_nd_extract }  : !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 { vectorize_nd_extract }  : !transform.any_op
+    transform.yield
+   }
+}
 
 // -----
 
@@ -100,11 +104,13 @@ func.func @vectorize_nd_tensor_extract_transfer_read_basic(%arg0: tensor<3x3x3xf
 // CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[IDX1]], %[[IDX2]], %[[C0:.*]]], %[[CST_0]] {in_bounds = [true, true, true]} : tensor<3x3x3xf32>, vector<1x1x3xf32>
 // CHECK:   vector.transfer_write %[[READ]], %[[ARG1]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x1x3xf32>, tensor<1x1x3xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
  // -----
@@ -152,12 +158,14 @@ func.func @vectorize_nd_tensor_extract_transfer_read_complex(%6: tensor<45x80x16
 // CHECK:           return %[[VAL_21]] : tensor<1x4xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -200,11 +208,13 @@ func.func @vectorize_nd_tensor_extract_index_from_tensor(%arg0: tensor<3x3xf32>,
 // CHECK:    %[[GATHER:.*]] = vector.gather %[[ARG0]][%[[C0]], %[[C0]]] [%[[T]]], %[[CST_1]], %[[PASSTHRU]] : tensor<3x3xf32>, vector<4x7x3x2xindex>, vector<4x7x3x2xi1>, vector<4x7x3x2xf32> into vector<4x7x3x2xf32>
 // CHECK:    vector.transfer_write %[[GATHER]], %[[ARG4]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true, true]} : vector<4x7x3x2xf32>, tensor<4x7x3x2xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 // -----
 
@@ -244,12 +254,14 @@ func.func @vectorize_nd_tensor_extract_contiguous_and_gather(%arg0: tensor<6xf32
 // CHECK:           %[[VAL_14:.*]] = vector.transfer_write %[[VAL_13]], %[[VAL_8]]{{\[}}%[[VAL_2]]] {in_bounds = [true]} : vector<5xf32>, tensor<5xf32>
 // CHECK:           return %[[VAL_14]] : tensor<5xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -286,12 +298,14 @@ func.func @vectorize_nd_tensor_extract_with_affine_apply_contiguous(%6: tensor<8
 // CHECK:           return %[[VAL_12]] : tensor<1x4xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -328,12 +342,14 @@ func.func @vectorize_nd_tensor_extract_with_tensor_extract(%input_1: tensor<1x20
 // CHECK:           vector.transfer_read %[[INPUT_2]][%{{.*}}, %{{.*}}, %{{.*}} {in_bounds = [true, true]} : tensor<257x24xf32>, vector<1x4xf32>
 
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -372,12 +388,14 @@ func.func @vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<80x16
 // CHECK:           return %[[VAL_14]] : tensor<1x4xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -412,12 +430,14 @@ func.func @vectorize_nd_tensor_extract_with_maxsi_gather(%arg0: tensor<80x16xf32
 // CHECK:           return %[[VAL_10]] : tensor<1x4xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -452,12 +472,14 @@ func.func @vectorize_nd_tensor_extract_with_maxsi_contiguous(%arg0: tensor<80x16
 // CHECK:           return %[[VAL_9]] : tensor<1x4xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -491,12 +513,14 @@ func.func @vectorize_nd_tensor_extract_block_arg(%arg0: tensor<5x6xf32>, %arg1:
 // CHECK:           return %[[VAL_12]] : tensor<5xf32>
 // CHECK:         }
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
 
 // -----
 
@@ -518,9 +542,11 @@ func.func @vectorize_0d_tensor_extract(%arg0: tensor<f32>, %arg2: tensor<1x1x3xf
 // CHECK:           %[[EXTRACT:.*]] = tensor.extract %[[ARG_0]][] : tensor<f32>
 // CHECK:           vector.broadcast %[[EXTRACT]] : f32 to vector<1x1x3xf32>
 
-transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
-   %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
- }
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+     transform.yield
+   }
+}
diff --git a/mlir/test/Dialect/MemRef/alloc-to-alloca.mlir b/mlir/test/Dialect/MemRef/alloc-to-alloca.mlir
index 2e788236ca91152..2db374ad092a989 100644
--- a/mlir/test/Dialect/MemRef/alloc-to-alloca.mlir
+++ b/mlir/test/Dialect/MemRef/alloc-to-alloca.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt --test-transform-dialect-interpreter=debug-transform-root-tag=all %s | FileCheck %s --check-prefixes=CHECK,ALL
-// RUN: mlir-opt --test-transform-dialect-interpreter=debug-transform-root-tag=small %s | FileCheck %s --check-prefixes=CHECK,SMALL
+// RUN: mlir-opt --transform-interpreter=entry-point=all %s | FileCheck %s --check-prefixes=CHECK,ALL
+// RUN: mlir-opt --transform-interpreter=entry-point=small %s | FileCheck %s --check-prefixes=CHECK,SMALL
 
 func.func private @callee(memref<*xf32>)
 
@@ -49,20 +49,22 @@ func.func @mismatching_scope() {
   return
 }
 
-transform.sequence failures(propagate) attributes {transform.target_tag = "all"} {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.alloc_to_alloca
-  } : !transform.any_op
-  transform.yield
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @all(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.alloc_to_alloca
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
-transform.sequence failures(propagate) attributes {transform.target_tag = "small"} {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.alloc_to_alloca size_limit(32)
-  } : !transform.any_op
-  transform.yield
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @small(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.alloc_to_alloca size_limit(32)
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/MemRef/extract-address-computations.mlir b/mlir/test/Dialect/MemRef/extract-address-computations.mlir
index 40393ff24e4a520..80f7e9c434c85dc 100644
--- a/mlir/test/Dialect/MemRef/extract-address-computations.mlir
+++ b/mlir/test/Dialect/MemRef/extract-address-computations.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -test-transform-dialect-interpreter %s --split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt -transform-interpreter %s --split-input-file --verify-diagnostics | FileCheck %s
 
 // Simple test: check that we extract the address computation of a load into
 // a dedicated subview.
@@ -21,14 +21,16 @@ func.func @test_load(%base : memref<2x16x16xf32>, %offset : index) -> f32 {
   return %loaded_val : f32
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %0, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %0, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -49,12 +51,14 @@ func.func @test_load_nontemporal(%base : memref<2x16x16xf32>, %offset : index) -
   return %loaded_val : f32
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -80,12 +84,14 @@ func.func @test_store(%base : memref<2x16x16xf32>, %offset : index) -> () {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -108,12 +114,14 @@ func.func @test_store_nontemporal(%base : memref<2x16x16xf32>, %offset : index)
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -164,12 +172,14 @@ func.func @testWithLoop(%base : memref<?x?x?xf32, strided<[?,?,?], offset: ?>>)
   return %sum_res2 : f32
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -204,12 +214,14 @@ func.func @test_ldmatrix(%base : memref<4x32x32xf16, 3>,
   return %loaded_val : vector<4x2xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -240,12 +252,14 @@ func.func @test_ldmatrix(%base : memref<?x?x?xf16, 3>,
   return %loaded_val : vector<4x2xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -277,12 +291,14 @@ func.func @test_transfer_read_op(%base : memref<?x?x?xf16>,
   return %loaded_val : vector<4x2xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -307,12 +323,14 @@ func.func @test_transfer_read_op_with_tensor(%base : tensor<?x?x?xf16>,
   return %loaded_val : vector<4x2xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -343,12 +361,14 @@ func.func @test_transfer_write_op(%base : memref<?x?x?xf16>,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -380,12 +400,14 @@ func.func @test_transfer_write_op_with_strides(%base : memref<?x?x?xf16, strided
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -409,11 +431,13 @@ func.func @test_transfer_write_op_with_tensor(%base : tensor<?x?x?xf16>,
   return %res : tensor<?x?x?xf16>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %0 {
-    transform.apply_patterns.memref.extract_address_computations
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %0 {
+      transform.apply_patterns.memref.extract_address_computations
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
diff --git a/mlir/test/Dialect/MemRef/make-loop-independent.mlir b/mlir/test/Dialect/MemRef/make-loop-independent.mlir
index 1a34d9ce74f246f..dca7bc1e67586ca 100644
--- a/mlir/test/Dialect/MemRef/make-loop-independent.mlir
+++ b/mlir/test/Dialect/MemRef/make-loop-independent.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s -allow-unregistered-dialect \
-// RUN:     -test-transform-dialect-interpreter -canonicalize \
+// RUN:     -transform-interpreter -canonicalize \
 // RUN:     -split-input-file -verify-diagnostics | FileCheck %s
 
 // CHECK: #[[$map:.*]] = affine_map<()[s0] -> (s0 - 1)>
@@ -35,10 +35,12 @@ func.func @make_alloca_loop_independent(%lb: index, %ub: index, %step: index) {
   }
   return
 }
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.memref.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.memref.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -67,8 +69,10 @@ func.func @make_alloca_loop_independent_static(%step: index) {
   }
   return
 }
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.memref.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.memref.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/MemRef/transform-ops.mlir b/mlir/test/Dialect/MemRef/transform-ops.mlir
index 68fea1f8402955c..338f6e3533ad903 100644
--- a/mlir/test/Dialect/MemRef/transform-ops.mlir
+++ b/mlir/test/Dialect/MemRef/transform-ops.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -verify-diagnostics -allow-unregistered-dialect -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -verify-diagnostics -allow-unregistered-dialect -split-input-file | FileCheck %s
 
 // CHECK-DAG: memref.global "private" @[[ALLOC0:alloc.*]] : memref<2x32xf32>
 // CHECK-DAG: memref.global "private" @[[ALLOC1:alloc.*]] : memref<2x32xf32>
@@ -20,13 +20,15 @@ func.func @func(%lb: index, %ub: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg0: !transform.any_op):
-  %alloca = transform.structured.match ops{["memref.alloca"]} in %arg0
-      : (!transform.any_op) -> !transform.op<"memref.alloca">
-  %get_global, %global = transform.memref.alloca_to_global %alloca
-        : (!transform.op<"memref.alloca">)
-          -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %alloca = transform.structured.match ops{["memref.alloca"]} in %arg0
+        : (!transform.any_op) -> !transform.op<"memref.alloca">
+    %get_global, %global = transform.memref.alloca_to_global %alloca
+          : (!transform.op<"memref.alloca">)
+            -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -59,12 +61,14 @@ func.func @multi_buffer(%in: memref<16xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -94,12 +98,14 @@ func.func @multi_buffer_on_affine_loop(%in: memref<16xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -132,10 +138,12 @@ func.func @multi_buffer_uses_with_no_loop_dominator(%in: memref<16xf32>, %cond:
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -167,11 +175,13 @@ func.func @multi_buffer_reject_alloca(%in: memref<16xf32>, %cond: i1) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloca">
-  // expected-error @below {{'transform.memref.multibuffer' op operand #0 must be Transform IR handle to memref.alloc operations, but got '!transform.op<"memref.alloca">'}}
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloca">) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloca">
+    // expected-error @below {{'transform.memref.multibuffer' op operand #0 must be Transform IR handle to memref.alloc operations, but got '!transform.op<"memref.alloca">'}}
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloca">) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -209,12 +219,14 @@ func.func @multi_buffer_one_alloc_with_use_outside_of_loop(%in: memref<16xf32>)
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64} : (!transform.op<"memref.alloc">) -> !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -243,12 +255,14 @@ func.func @multi_buffer_no_analysis(%in: memref<16xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64, skip_analysis} : (!transform.op<"memref.alloc">) -> !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64, skip_analysis} : (!transform.op<"memref.alloc">) -> !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -280,12 +294,14 @@ func.func @multi_buffer_dealloc(%in: memref<16xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
-  %1 = transform.memref.multibuffer %0 {factor = 2 : i64, skip_analysis} : (!transform.op<"memref.alloc">) -> !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["memref.alloc"]} in %arg1 : (!transform.any_op) -> !transform.op<"memref.alloc">
+    %1 = transform.memref.multibuffer %0 {factor = 2 : i64, skip_analysis} : (!transform.op<"memref.alloc">) -> !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %1, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -303,10 +319,12 @@ func.func @dead_alloc() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.memref.erase_dead_alloc_and_stores %0 : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.memref.erase_dead_alloc_and_stores %0 : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -326,10 +344,12 @@ func.func @store_to_load(%arg: vector<4xf32>) -> vector<4xf32> {
   return %r : vector<4xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.memref.erase_dead_alloc_and_stores %0 : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.memref.erase_dead_alloc_and_stores %0 : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -342,12 +362,14 @@ func.func @lower_to_llvm() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %0 {
-    transform.apply_conversion_patterns.dialect_to_llvm "memref"
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-  } {legal_dialects = ["func", "llvm"]} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %0 {
+      transform.apply_conversion_patterns.dialect_to_llvm "memref"
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+    } {legal_dialects = ["func", "llvm"]} : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/NVGPU/tmaload-transform.mlir b/mlir/test/Dialect/NVGPU/tmaload-transform.mlir
index 30f8c45709bcd98..ab6483151a63f2f 100644
--- a/mlir/test/Dialect/NVGPU/tmaload-transform.mlir
+++ b/mlir/test/Dialect/NVGPU/tmaload-transform.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s \
-// RUN:     -test-transform-dialect-interpreter \
+// RUN:     -transform-interpreter \
 // RUN:     -test-transform-dialect-erase-schedule \
 // RUN: | FileCheck %s
 
@@ -18,12 +18,12 @@ func.func @main() {
   //      CHECK: %[[M1:.*]] = memref.cast %{{.*}} : memref<64x8xf32> to memref<*xf32>
   //      CHECK: %[[c64:.*]] = arith.constant 64 : index
   //      CHECK: %[[c8:.*]] = arith.constant 8 : index
-  //      CHECK: %[[D1:.*]] = nvgpu.tma.create.descriptor %[[M1]] box[%[[c64]], %[[c8]]] 
+  //      CHECK: %[[D1:.*]] = nvgpu.tma.create.descriptor %[[M1]] box[%[[c64]], %[[c8]]]
   // CHECK-SAME:   : memref<*xf32> -> <tensor = memref<64x8xf32, #gpu.address_space<workgroup>>, swizzle = none, l2promo = none, oob = zero, interleave = none>
   //      CHECK: %[[cast_2:.*]] = memref.cast %memref_0 : memref<8x128xf32> to memref<*xf32>
   //      CHECK: %[[c8_2:.*]] = arith.constant 8 : index
   //      CHECK: %[[c128_2:.*]] = arith.constant 128 : index
-  //      CHECK: %[[D2:.*]] = nvgpu.tma.create.descriptor %cast_2 box[%[[c8_2]], %[[c128_2]]] 
+  //      CHECK: %[[D2:.*]] = nvgpu.tma.create.descriptor %cast_2 box[%[[c8_2]], %[[c128_2]]]
   // CHECK-SAME:   : memref<*xf32> -> <tensor = memref<8x128xf32, #gpu.address_space<workgroup>>, swizzle = none, l2promo = none, oob = zero, interleave = none>
   // CHECK: gpu.launch
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
@@ -32,7 +32,7 @@ func.func @main() {
     //      CHECK: %[[G2:.*]] = memref.get_global @bufferRhsGlobal : memref<8x128xf32, #gpu.address_space<workgroup>>
     %out = memref.get_global @bufferLhsGlobal : memref<64x8xf32, #gpu.address_space<workgroup>>
     %out_1 = memref.get_global @bufferRhsGlobal : memref<8x128xf32, #gpu.address_space<workgroup>>
-    
+
     //      CHECK: %[[B:.*]] = nvgpu.mbarrier.create -> <memorySpace = #gpu.address_space<workgroup>
     //      CHECK: nvgpu.mbarrier.init %[[B]][%{{.*}}], %{{.*}} : <memorySpace = #gpu.address_space<workgroup>
     //      CHECK: gpu.barrier
@@ -44,15 +44,15 @@ func.func @main() {
     //      CHECK: scf.if %[[CMP]] {
     //
     //      CHECK:   %[[c0_7:.*]] = arith.constant 0 : index
-    //      CHECK:   nvgpu.tma.async.load %[[D1]][%[[c0_7]], %[[c0_7]]], %[[B]][%{{.*}}] to %[[G1]] 
-    // CHECK-SAME:     : <tensor = memref<64x8xf32, #gpu.address_space<workgroup>>, 
+    //      CHECK:   nvgpu.tma.async.load %[[D1]][%[[c0_7]], %[[c0_7]]], %[[B]][%{{.*}}] to %[[G1]]
+    // CHECK-SAME:     : <tensor = memref<64x8xf32, #gpu.address_space<workgroup>>,
     // CHECK-SAME:        swizzle = none, l2promo = none, oob = zero, interleave = none>, <memorySpace = #gpu.address_space<workgroup>
     // CHECK-SAME:     -> memref<64x8xf32, #gpu.address_space<workgroup>>
     //
     //      CHECK:   %[[c0_8:.*]] = arith.constant 0 : index
-    //      CHECK:   nvgpu.tma.async.load %[[D2]][%[[c0_8]], %[[c0_8]]], %[[B]][%{{.*}}] to %[[G2]] 
+    //      CHECK:   nvgpu.tma.async.load %[[D2]][%[[c0_8]], %[[c0_8]]], %[[B]][%{{.*}}] to %[[G2]]
     // CHECK-SAME:     : <tensor = memref<8x128xf32, #gpu.address_space<workgroup>>,
-    // CHECK-SAME:         swizzle = none, l2promo = none, oob = zero, interleave = none>, <memorySpace = #gpu.address_space<workgroup> 
+    // CHECK-SAME:         swizzle = none, l2promo = none, oob = zero, interleave = none>, <memorySpace = #gpu.address_space<workgroup>
     // CHECK-SAME:    -> memref<8x128xf32, #gpu.address_space<workgroup>>
     //
     //      CHECK:   %[[c6144:.*]] = arith.constant 6144 : index
@@ -66,19 +66,21 @@ func.func @main() {
     //      CHECK: %[[c10000000:.*]] = arith.constant 10000000 : index
     //      CHECK: nvgpu.mbarrier.try_wait.parity %[[B]][%{{.*}}], %[[c0_6]], %[[c10000000]] : <memorySpace = #gpu.address_space<workgroup>
 
-    /// Both copies are matched and end up in the same async group.    
+    /// Both copies are matched and end up in the same async group.
     linalg.copy ins(%memref: memref<64x8xf32>) outs(%out: memref<64x8xf32, #gpu.address_space<workgroup>>)
     linalg.copy ins(%memref_1: memref<8x128xf32>) outs(%out_1: memref<8x128xf32, #gpu.address_space<workgroup>>)
 
     gpu.terminator
   }
-  
+
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %copy = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.nvgpu.rewrite_copy_as_tma %copy  : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %copy = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.nvgpu.rewrite_copy_as_tma %copy  : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/NVGPU/transform-create-async-groups.mlir b/mlir/test/Dialect/NVGPU/transform-create-async-groups.mlir
index d4f46fd06c37e3f..e1e4111adca88ce 100644
--- a/mlir/test/Dialect/NVGPU/transform-create-async-groups.mlir
+++ b/mlir/test/Dialect/NVGPU/transform-create-async-groups.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file --verify-diagnostics | FileCheck %s
 
 // Check that we produce async copies from the vector.transfer_xxx operations.
 builtin.module {
@@ -22,10 +22,12 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -54,10 +56,12 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func : (!transform.any_op) -> (!transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func : (!transform.any_op) -> (!transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -84,10 +88,12 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func : (!transform.any_op) -> (!transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func : (!transform.any_op) -> (!transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -120,10 +126,12 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func : (!transform.any_op) -> (!transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func : (!transform.any_op) -> (!transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -145,10 +153,12 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
+      transform.yield
+    }
   }
 }
 
@@ -184,15 +194,17 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.apply_patterns to %top_level_func {
-      transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
-    } : !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
-    %top_level_func_2 = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.apply_cse to %top_level_func_2 : !transform.any_op
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.apply_patterns to %top_level_func {
+        transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
+      } : !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
+      %top_level_func_2 = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.apply_cse to %top_level_func_2 : !transform.any_op
+      transform.yield
+    }
   }
 }
 
@@ -245,14 +257,16 @@ builtin.module {
     return
   }
 
-  transform.sequence failures(propagate) {
-  ^bb1(%variant_op: !transform.any_op):
-    %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.apply_patterns to %top_level_func {
-      transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
-    } : !transform.any_op
-    transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
-    %top_level_func_2 = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
-    transform.apply_cse to %top_level_func_2 : !transform.any_op
+  module attributes {transform.with_named_sequence} {
+    transform.named_sequence @__transform_main(%variant_op: !transform.any_op {transform.readonly}) {
+      %top_level_func = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.apply_patterns to %top_level_func {
+        transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
+      } : !transform.any_op
+      transform.nvgpu.create_async_groups %top_level_func {bypass_l1} : (!transform.any_op) -> (!transform.any_op)
+      %top_level_func_2 = transform.structured.match ops{["func.func"]} in %variant_op : (!transform.any_op) -> !transform.any_op
+      transform.apply_cse to %top_level_func_2 : !transform.any_op
+      transform.yield
+    }
   }
 }
diff --git a/mlir/test/Dialect/NVGPU/transform-matmul-to-nvvm.mlir b/mlir/test/Dialect/NVGPU/transform-matmul-to-nvvm.mlir
index 7125b8570262cc3..07e03f3b8473d2a 100644
--- a/mlir/test/Dialect/NVGPU/transform-matmul-to-nvvm.mlir
+++ b/mlir/test/Dialect/NVGPU/transform-matmul-to-nvvm.mlir
@@ -1,6 +1,6 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
 
-// CHECK: #[[$div4:.*]]  = affine_map<()[s0] -> (s0 floordiv 4)>                                    
+// CHECK: #[[$div4:.*]]  = affine_map<()[s0] -> (s0 floordiv 4)>
 // CHECK: #[[$mod4:.*]] = affine_map<()[s0] -> (s0 mod 4)>
 // CHECK: #[[$div4p8:.*]] = affine_map<()[s0] -> (s0 floordiv 4 + 8)>
 // CHECK: #[[$map3:.*]] = affine_map<()[s0] -> (s0 * 2 - (s0 floordiv 4) * 8)>
@@ -73,12 +73,14 @@ func.func @matmul_16x8x4xf32_global(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.nvgpu.rewrite_matmul_as_mma_sync %matmul 
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.nvgpu.rewrite_matmul_as_mma_sync %matmul
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
 
 // -----
@@ -88,13 +90,13 @@ func.func @matmul_16x8x16xf16_global(
     %A: memref<16x16xf16>, %B: memref<16x8xf16>, %C: memref<16x8xf16>) {
 
   // CHECK-COUNT-8: memref.load {{.*}} : memref<16x16xf16>
-  // CHECK-COUNT-8: vector.insert {{.*}} : f16 into vector<4x2xf16> 
+  // CHECK-COUNT-8: vector.insert {{.*}} : f16 into vector<4x2xf16>
   // CHECK-COUNT-4: memref.load {{.*}} : memref<16x8xf16>
-  // CHECK-COUNT-4: vector.insert {{.*}} : f16 into vector<2x2xf16> 
+  // CHECK-COUNT-4: vector.insert {{.*}} : f16 into vector<2x2xf16>
   // CHECK-COUNT-4: memref.load {{.*}} : memref<16x8xf16>
   // CHECK-COUNT-4: vector.insert {{.*}} : f16 into vector<2x2xf16>
   //
-  //         CHECK: nvgpu.mma.sync(%{{.*}}) {mmaShape = [16, 8, 16]} 
+  //         CHECK: nvgpu.mma.sync(%{{.*}}) {mmaShape = [16, 8, 16]}
   //    CHECK-SAME:   : (vector<4x2xf16>, vector<2x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
   //
   // CHECK-COUNT-4: vector.extract %{{.*}} : f16 from vector<2x2xf16>
@@ -104,10 +106,12 @@ func.func @matmul_16x8x16xf16_global(
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.nvgpu.rewrite_matmul_as_mma_sync %matmul 
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.nvgpu.rewrite_matmul_as_mma_sync %matmul
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/NVGPU/transform-pipeline-shared.mlir b/mlir/test/Dialect/NVGPU/transform-pipeline-shared.mlir
index 56af734b3985108..02aca49052ad173 100644
--- a/mlir/test/Dialect/NVGPU/transform-pipeline-shared.mlir
+++ b/mlir/test/Dialect/NVGPU/transform-pipeline-shared.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file --verify-diagnostics | FileCheck %s
 
 func.func @simple_depth_2_unpeeled(%global: memref<?xf32>, %result: memref<?xf32> ) {
   %c0 = arith.constant 0 : index
@@ -19,12 +19,14 @@ func.func @simple_depth_2_unpeeled(%global: memref<?xf32>, %result: memref<?xf32
 
 !t = !transform.any_op
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !t):
-  %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
-  // expected-error @below {{irreversible pipelining failure}}
-  // expected-note @below {{try setting "peel_epilogue"}}
-  transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2 } : (!t) -> !t
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !t {transform.readonly}) {
+    %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
+    // expected-error @below {{irreversible pipelining failure}}
+    // expected-note @below {{try setting "peel_epilogue"}}
+    transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2 } : (!t) -> !t
+    transform.yield
+  }
 }
 
 // -----
@@ -64,10 +66,12 @@ func.func @simple_depth_2_peeled(%global: memref<?xf32>) {
 
 !t = !transform.any_op
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !t):
-  %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
-  transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2, peel_epilogue } : (!t) -> !t
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !t {transform.readonly}) {
+    %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
+    transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2, peel_epilogue } : (!t) -> !t
+    transform.yield
+  }
 }
 
 // -----
@@ -135,10 +139,12 @@ func.func @async_depth_2_predicated(%global: memref<?xf32>) {
 
 !t = !transform.any_op
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !t):
-  %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
-  transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2 } : (!t) -> !t
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !t {transform.readonly}) {
+    %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
+    transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2 } : (!t) -> !t
+    transform.yield
+  }
 }
 
 // -----
@@ -175,8 +181,10 @@ func.func @async_depth_2_peeled(%global: memref<?xf32>) {
 
 !t = !transform.any_op
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !t):
-  %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
-  transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2, peel_epilogue } : (!t) -> !t
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !t {transform.readonly}) {
+    %loop = transform.structured.match ops{["scf.for"]} in %arg0 : (!t) -> !t
+    transform.nvgpu.pipeline_shared_memory_copies failures(propagate) %loop { depth = 2, peel_epilogue } : (!t) -> !t
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
index b00180c96180610..faaa2db3aa57dee 100644
--- a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
+++ b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter --cse --canonicalize -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter --cse --canonicalize -split-input-file -verify-diagnostics | FileCheck %s
 
 func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
   %zero = arith.constant 0.0 : f32
@@ -22,16 +22,18 @@ func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tenso
   func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%variant_op : !transform.any_op):
-  %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
+    %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
 
-  %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %fused_loop = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    %fused_loop = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -48,16 +50,18 @@ func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tenso
   func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%variant_op : !transform.any_op):
-  %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
+    %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
 
-  %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %fused_loop = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    %fused_loop = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -74,16 +78,18 @@ func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tenso
   func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%variant_op : !transform.any_op):
-  %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
+    %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
 
-  %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %fused_loop = transform.loop.fuse_sibling %loop2 into %loop1 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    %fused_loop = transform.loop.fuse_sibling %loop2 into %loop1 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -100,14 +106,16 @@ func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tenso
   func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%variant_op : !transform.any_op):
-  %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
+    %matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
 
-  %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %mm1, %mm2 = transform.split_handle %matched : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
-  %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    %tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %fused_loop = transform.loop.fuse_sibling %loop2 into %loop1 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    %fused_loop = transform.loop.fuse_sibling %loop2 into %loop1 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SCF/transform-op-coalesce.mlir b/mlir/test/Dialect/SCF/transform-op-coalesce.mlir
index 12b546516b235a7..2d59331b72cf65d 100644
--- a/mlir/test/Dialect/SCF/transform-op-coalesce.mlir
+++ b/mlir/test/Dialect/SCF/transform-op-coalesce.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s
 
 func.func @coalesce_inner() {
   %c0 = arith.constant 0 : index
@@ -22,11 +22,13 @@ func.func @coalesce_inner() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.cast %0 : !transform.any_op to !transform.op<"scf.for">
-  %2 = transform.loop.coalesce %1: (!transform.op<"scf.for">) -> (!transform.op<"scf.for">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.cast %0 : !transform.any_op to !transform.op<"scf.for">
+    %2 = transform.loop.coalesce %1: (!transform.op<"scf.for">) -> (!transform.op<"scf.for">)
+    transform.yield
+  }
 }
 
 // -----
@@ -48,11 +50,13 @@ func.func @coalesce_outer(%arg1: memref<64x64xf32, 1>, %arg2: memref<64x64xf32,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["affine.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.cast %0 : !transform.any_op to !transform.op<"affine.for">
-  %2 = transform.loop.coalesce %1 : (!transform.op<"affine.for">) -> (!transform.op<"affine.for">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["affine.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.cast %0 : !transform.any_op to !transform.op<"affine.for">
+    %2 = transform.loop.coalesce %1 : (!transform.op<"affine.for">) -> (!transform.op<"affine.for">)
+    transform.yield
+  }
 }
 
 // -----
@@ -83,10 +87,12 @@ func.func @coalesce_and_unroll(%arg1: memref<64x64xf32, 1>, %arg2: memref<64x64x
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.cast %0 : !transform.any_op to !transform.op<"scf.for">
-  %2 = transform.loop.coalesce %1 : (!transform.op<"scf.for">) -> (!transform.op<"scf.for">)
-  transform.loop.unroll %2 {factor = 3} : !transform.op<"scf.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.cast %0 : !transform.any_op to !transform.op<"scf.for">
+    %2 = transform.loop.coalesce %1 : (!transform.op<"scf.for">) -> (!transform.op<"scf.for">)
+    transform.loop.unroll %2 {factor = 3} : !transform.op<"scf.for">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SCF/transform-op-forall-to-for.mlir b/mlir/test/Dialect/SCF/transform-op-forall-to-for.mlir
index 4b46c68d06d3514..f09cae988a2fb4c 100644
--- a/mlir/test/Dialect/SCF/transform-op-forall-to-for.mlir
+++ b/mlir/test/Dialect/SCF/transform-op-forall-to-for.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file --verify-diagnostics | FileCheck %s
 
 func.func private @callee(%i: index, %j: index)
 
@@ -14,10 +14,12 @@ func.func @two_iters(%ub1: index, %ub2: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  transform.loop.forall_to_for %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.loop.forall_to_for %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -34,11 +36,13 @@ func.func @repeated(%ub1: index, %ub2: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{expected a single payload op}}
-  transform.loop.forall_to_for %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{expected a single payload op}}
+    transform.loop.forall_to_for %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -53,11 +57,13 @@ func.func @repeated(%ub1: index, %ub2: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{op expects as many results (1) as payload has induction variables (2)}}
-  transform.loop.forall_to_for %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{op expects as many results (1) as payload has induction variables (2)}}
+    transform.loop.forall_to_for %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -65,9 +71,11 @@ transform.sequence failures(propagate) {
 // expected-note @below {{payload op}}
 func.func private @callee(%i: index, %j: index)
 
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{expected the payload to be scf.forall}}
-  transform.loop.forall_to_for %0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{expected the payload to be scf.forall}}
+    transform.loop.forall_to_for %0 : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SCF/transform-op-take-assumed-branch.mlir b/mlir/test/Dialect/SCF/transform-op-take-assumed-branch.mlir
index ab6696138ceb7d5..6c2a356ed53dec7 100644
--- a/mlir/test/Dialect/SCF/transform-op-take-assumed-branch.mlir
+++ b/mlir/test/Dialect/SCF/transform-op-take-assumed-branch.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics --allow-unregistered-dialect | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics --allow-unregistered-dialect | FileCheck %s
 
 func.func @if_no_else(%cond: i1, %a: index, %b: memref<?xf32>, %c: i8) {
   scf.if %cond {
@@ -8,13 +8,15 @@ func.func @if_no_else(%cond: i1, %a: index, %b: memref<?xf32>, %c: i8) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %if = transform.structured.match ops{["scf.if"]} in %arg1
-    : (!transform.any_op) -> !transform.any_op
-  // expected-error @+1 {{requires an scf.if op with a single-block `else` region}}
-  transform.scf.take_assumed_branch %if take_else_branch
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %if = transform.structured.match ops{["scf.if"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    // expected-error @+1 {{requires an scf.if op with a single-block `else` region}}
+    transform.scf.take_assumed_branch %if take_else_branch
+      : (!transform.any_op) -> ()
+      transform.yield
+  }
 }
 
 // -----
@@ -28,31 +30,33 @@ func.func @if_no_else(%cond: i1, %a: index, %b: memref<?xf32>, %c: i8) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %if = transform.structured.match ops{["scf.if"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  %some_op = transform.structured.match ops{["some_op"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %if = transform.structured.match ops{["scf.if"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    %some_op = transform.structured.match ops{["some_op"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+
+    transform.scf.take_assumed_branch %if : (!transform.any_op) -> ()
 
-  transform.scf.take_assumed_branch %if : (!transform.any_op) -> ()
-  
-  // Handle to formerly nested `some_op` is still valid after the transform.
-  transform.print %some_op: !transform.any_op
+    // Handle to formerly nested `some_op` is still valid after the transform.
+    transform.print %some_op: !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
 
 // CHECK-LABEL: tile_tensor_pad
 func.func @tile_tensor_pad(
-  %arg0 : tensor<?x?xf32>, %cst : f32, %low: index, %high: index) 
+  %arg0 : tensor<?x?xf32>, %cst : f32, %low: index, %high: index)
     -> tensor<20x40xf32>
 {
   //     CHECK: scf.forall
   // CHECK-NOT:   scf.if
   // CHECK-NOT:     tensor.generate
   // CHECK-NOT:   else
-  //     CHECK:     tensor.pad {{.*}} nofold 
+  //     CHECK:     tensor.pad {{.*}} nofold
   %0 = tensor.pad %arg0 nofold low[%low, %low] high[%high, %high] {
         ^bb0(%arg9: index, %arg10: index):
           tensor.yield %cst : f32
@@ -60,15 +64,17 @@ func.func @tile_tensor_pad(
   return %0 : tensor<20x40xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb0(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.structured.tile_using_forall %0 tile_sizes[1, 1] 
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.structured.tile_using_forall %0 tile_sizes[1, 1]
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
 
-  %if = transform.structured.match ops{["scf.if"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.scf.take_assumed_branch %if take_else_branch 
-    : (!transform.any_op) -> ()
+    %if = transform.structured.match ops{["scf.if"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.scf.take_assumed_branch %if take_else_branch
+      : (!transform.any_op) -> ()
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SCF/transform-ops-invalid.mlir b/mlir/test/Dialect/SCF/transform-ops-invalid.mlir
index e8212f500e693ad..96c57d4716d3755 100644
--- a/mlir/test/Dialect/SCF/transform-ops-invalid.mlir
+++ b/mlir/test/Dialect/SCF/transform-ops-invalid.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter --split-input-file --verify-diagnostics
+// RUN: mlir-opt %s -transform-interpreter --split-input-file --verify-diagnostics
 
 #map0 = affine_map<(d0) -> (d0 * 110)>
 #map1 = affine_map<(d0) -> (696, d0 * 110 + 110)>
@@ -10,12 +10,14 @@ func.func @test_loops_do_not_get_coalesced() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["affine.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.cast %0 : !transform.any_op to !transform.op<"affine.for">
-  // expected-error @below {{failed to coalesce}}
-  %2 = transform.loop.coalesce %1: (!transform.op<"affine.for">) -> (!transform.op<"affine.for">)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["affine.for"]} attributes {coalesce} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.cast %0 : !transform.any_op to !transform.op<"affine.for">
+    // expected-error @below {{failed to coalesce}}
+    %2 = transform.loop.coalesce %1: (!transform.op<"affine.for">) -> (!transform.op<"affine.for">)
+    transform.yield
+  }
 }
 
 // -----
@@ -27,12 +29,14 @@ func.func @test_loops_do_not_get_unrolled() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
-  // expected-error @below {{failed to unroll}}
-  transform.loop.unroll %1 { factor = 8 } : !transform.op<"affine.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    // expected-error @below {{failed to unroll}}
+    transform.loop.unroll %1 { factor = 8 } : !transform.op<"affine.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -53,11 +57,13 @@ func.func @loop_outline_op_multi_region() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.while"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{failed to outline}}
-  transform.loop.outline %0 {func_name = "foo"} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.while"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{failed to outline}}
+    transform.loop.outline %0 {func_name = "foo"} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -72,10 +78,12 @@ func.func @test_loops_do_not_get_peeled() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
-  // expected-error @below {{failed to peel}}
-  transform.loop.peel %1 : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+    // expected-error @below {{failed to peel}}
+    transform.loop.peel %1 : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SCF/transform-ops.mlir b/mlir/test/Dialect/SCF/transform-ops.mlir
index 043998711f64e5f..6d1ba48d3b935bb 100644
--- a/mlir/test/Dialect/SCF/transform-ops.mlir
+++ b/mlir/test/Dialect/SCF/transform-ops.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s
 
 // CHECK-LABEL: @get_parent_for_op
 func.func @get_parent_for_op(%arg0: index, %arg1: index, %arg2: index) {
@@ -15,16 +15,18 @@ func.func @get_parent_for_op(%arg0: index, %arg1: index, %arg2: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // CHECK: = transform.loop.get_parent_for
-  %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
-  %2 = transform.loop.get_parent_for %0 { num_loops = 2 } : (!transform.any_op) -> !transform.op<"scf.for">
-  %3 = transform.loop.get_parent_for %0 { num_loops = 3 } : (!transform.any_op) -> !transform.op<"scf.for">
-  transform.test_print_remark_at_operand %1, "third loop" : !transform.op<"scf.for">
-  transform.test_print_remark_at_operand %2, "second loop" : !transform.op<"scf.for">
-  transform.test_print_remark_at_operand %3, "first loop" : !transform.op<"scf.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // CHECK: = transform.loop.get_parent_for
+    %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+    %2 = transform.loop.get_parent_for %0 { num_loops = 2 } : (!transform.any_op) -> !transform.op<"scf.for">
+    %3 = transform.loop.get_parent_for %0 { num_loops = 3 } : (!transform.any_op) -> !transform.op<"scf.for">
+    transform.test_print_remark_at_operand %1, "third loop" : !transform.op<"scf.for">
+    transform.test_print_remark_at_operand %2, "second loop" : !transform.op<"scf.for">
+    transform.test_print_remark_at_operand %3, "first loop" : !transform.op<"scf.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -35,11 +37,13 @@ func.func @get_parent_for_op_no_loop(%arg0: index, %arg1: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{could not find an 'scf.for' parent}}
-  %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{could not find an 'scf.for' parent}}
+    %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -74,12 +78,14 @@ func.func @loop_outline_op(%arg0: index, %arg1: index, %arg2: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0  : (!transform.any_op) -> !transform.op<"scf.for">
-  // CHECK: = transform.loop.outline %{{.*}}
-  transform.loop.outline %1 {func_name = "foo"} : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0  : (!transform.any_op) -> !transform.op<"scf.for">
+    // CHECK: = transform.loop.outline %{{.*}}
+    transform.loop.outline %1 {func_name = "foo"} : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 // -----
@@ -105,14 +111,16 @@ func.func @loop_peel_op() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
-  %main_loop, %remainder = transform.loop.peel %1 : (!transform.op<"scf.for">) -> (!transform.op<"scf.for">, !transform.op<"scf.for">)
-  // Make sure 
-  transform.test_print_remark_at_operand %main_loop, "main loop" : !transform.op<"scf.for">
-  transform.test_print_remark_at_operand %remainder, "remainder loop" : !transform.op<"scf.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+    %main_loop, %remainder = transform.loop.peel %1 : (!transform.op<"scf.for">) -> (!transform.op<"scf.for">, !transform.op<"scf.for">)
+    // Make sure 
+    transform.test_print_remark_at_operand %main_loop, "main loop" : !transform.op<"scf.for">
+    transform.test_print_remark_at_operand %remainder, "remainder loop" : !transform.op<"scf.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -141,13 +149,15 @@ func.func @loop_pipeline_op(%A: memref<?xf32>, %result: memref<?xf32>) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
-  %2 = transform.loop.pipeline %1 : (!transform.op<"scf.for">) -> !transform.any_op
-  // Verify that the returned handle is usable.
-  transform.test_print_remark_at_operand %2, "transformed" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+    %2 = transform.loop.pipeline %1 : (!transform.op<"scf.for">) -> !transform.any_op
+    // Verify that the returned handle is usable.
+    transform.test_print_remark_at_operand %2, "transformed" : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -165,11 +175,13 @@ func.func @loop_unroll_op() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
-  transform.loop.unroll %1 { factor = 4 } : !transform.op<"scf.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 : (!transform.any_op) -> !transform.op<"scf.for">
+    transform.loop.unroll %1 { factor = 4 } : !transform.op<"scf.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -189,16 +201,18 @@ func.func @get_parent_for_op(%arg0: index, %arg1: index, %arg2: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // CHECK: = transform.loop.get_parent_for
-  %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
-  %2 = transform.loop.get_parent_for %0 { num_loops = 2, affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
-  %3 = transform.loop.get_parent_for %0 { num_loops = 3, affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
-  transform.test_print_remark_at_operand %1, "third loop" : !transform.op<"affine.for">
-  transform.test_print_remark_at_operand %2, "second loop" : !transform.op<"affine.for">
-  transform.test_print_remark_at_operand %3, "first loop" : !transform.op<"affine.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // CHECK: = transform.loop.get_parent_for
+    %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    %2 = transform.loop.get_parent_for %0 { num_loops = 2, affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    %3 = transform.loop.get_parent_for %0 { num_loops = 3, affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    transform.test_print_remark_at_operand %1, "third loop" : !transform.op<"affine.for">
+    transform.test_print_remark_at_operand %2, "second loop" : !transform.op<"affine.for">
+    transform.test_print_remark_at_operand %3, "first loop" : !transform.op<"affine.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -209,11 +223,13 @@ func.func @get_parent_for_op_no_loop(%arg0: index, %arg1: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error @below {{could not find an 'affine.for' parent}}
-  %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error @below {{could not find an 'affine.for' parent}}
+    %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -231,12 +247,14 @@ func.func @loop_unroll_op() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
-  transform.test_print_remark_at_operand %1, "affine for loop" : !transform.op<"affine.for">
-  transform.loop.unroll %1 { factor = 4, affine = true } : !transform.op<"affine.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 { affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    transform.test_print_remark_at_operand %1, "affine for loop" : !transform.op<"affine.for">
+    transform.loop.unroll %1 { factor = 4, affine = true } : !transform.op<"affine.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -256,12 +274,14 @@ func.func @test_mixed_loops() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.loop.get_parent_for %0 { num_loops = 1, affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
-  transform.test_print_remark_at_operand %1, "affine for loop" : !transform.op<"affine.for">
-  transform.loop.unroll %1 { factor = 4 } : !transform.op<"affine.for">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["arith.addi"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.loop.get_parent_for %0 { num_loops = 1, affine = true } : (!transform.any_op) -> !transform.op<"affine.for">
+    transform.test_print_remark_at_operand %1, "affine for loop" : !transform.op<"affine.for">
+    transform.loop.unroll %1 { factor = 4 } : !transform.op<"affine.for">
+    transform.yield
+  }
 }
 
 // -----
@@ -280,10 +300,12 @@ func.func @test_promote_if_one_iteration(%a: index) -> index {
   return %0 : index
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["scf.for"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.loop.promote_if_one_iteration %0 : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["scf.for"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.loop.promote_if_one_iteration %0 : !transform.any_op
+    transform.yield
+  }
 }
 
 
@@ -303,12 +325,14 @@ func.func @test_structural_conversion_patterns(%a: tensor<f32>) -> tensor<f32> {
   return %0 : tensor<f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %0 {
-    transform.apply_conversion_patterns.scf.structural_conversions
-  } with type_converter {
-    transform.apply_conversion_patterns.transform.test_type_converter
-  } {  partial_conversion  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %0 {
+      transform.apply_conversion_patterns.scf.structural_conversions
+    } with type_converter {
+      transform.apply_conversion_patterns.transform.test_type_converter
+    } {  partial_conversion  } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/SparseTensor/transform-ops.mlir b/mlir/test/Dialect/SparseTensor/transform-ops.mlir
index 4a9060b5c10cf00..b186f518fbf626a 100644
--- a/mlir/test/Dialect/SparseTensor/transform-ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/transform-ops.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --verify-diagnostics --split-input-file
+// RUN: mlir-opt %s --transform-interpreter --verify-diagnostics --split-input-file
 
 module attributes { transform.with_named_sequence } {
   transform.named_sequence @match_sparse_structured(%arg0: !transform.any_op {transform.readonly}) -> !transform.any_op {
@@ -17,11 +17,11 @@ module attributes { transform.with_named_sequence } {
   }
 
   // Entry point. Match any structured sparse operation and emit at remark.
-  transform.sequence failures(propagate) attributes { transform.target_tag = "transform" } {
-  ^bb0(%arg0: !transform.any_op):
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.consumed}) {
     transform.foreach_match in %arg0
         @match_sparse_structured -> @print_sparse_structured
         : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
 
diff --git a/mlir/test/Dialect/Tensor/fold-empty-op.mlir b/mlir/test/Dialect/Tensor/fold-empty-op.mlir
index 70788e9ec72e609..057e105f3b572c3 100644
--- a/mlir/test/Dialect/Tensor/fold-empty-op.mlir
+++ b/mlir/test/Dialect/Tensor/fold-empty-op.mlir
@@ -1,10 +1,12 @@
-// RUN: mlir-opt -split-input-file -test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt -split-input-file -transform-interpreter %s | FileCheck %s
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.tensor.fold_tensor_empty
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.tensor.fold_tensor_empty
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 floordiv 28)>
@@ -64,12 +66,14 @@ func.func @rank_reducing_empty_tensor_extract(%sz : index, %idx : index) -> tens
 
 // -----
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.tensor.fold_tensor_empty
-        {fold_single_use_only = true}
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.tensor.fold_tensor_empty
+          {fold_single_use_only = true}
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 func.func @double_use_of_tensor_empty(%arg0: index, %arg1: index)
diff --git a/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir b/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir
index e335277ccf18a1a..505abc8f3533595 100644
--- a/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir
+++ b/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir
@@ -1,10 +1,12 @@
-// RUN: mlir-opt -split-input-file -test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt -split-input-file -transform-interpreter %s | FileCheck %s
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.tensor.fold_tensor_subset_ops_into_vector_transfers
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.tensor.fold_tensor_subset_ops_into_vector_transfers
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // CHECK: #[[$map:.*]] = affine_map<()[s0] -> (s0 + 4)>
diff --git a/mlir/test/Dialect/Tensor/rewrite-as-constant.mlir b/mlir/test/Dialect/Tensor/rewrite-as-constant.mlir
index 7037d7b119f6670..d68a6bd25286d71 100644
--- a/mlir/test/Dialect/Tensor/rewrite-as-constant.mlir
+++ b/mlir/test/Dialect/Tensor/rewrite-as-constant.mlir
@@ -1,10 +1,12 @@
-// RUN: mlir-opt -split-input-file -test-transform-dialect-interpreter %s | FileCheck %s
+// RUN: mlir-opt -split-input-file -transform-interpreter %s | FileCheck %s
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.tensor.rewrite_as_constant
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.tensor.rewrite_as_constant
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @tensor_generate_constant(
diff --git a/mlir/test/Dialect/Tensor/tiling.mlir b/mlir/test/Dialect/Tensor/tiling.mlir
index 556718785f93a82..51f33a96e571b83 100644
--- a/mlir/test/Dialect/Tensor/tiling.mlir
+++ b/mlir/test/Dialect/Tensor/tiling.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -canonicalize -cse -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -canonicalize -cse -split-input-file | FileCheck %s
 
 //  CHECK-DAG:  #[[MAP0:.*]] = affine_map<()[s0] -> (s0 + 8)>
 //  CHECK-DAG:  #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 7)>
@@ -31,10 +31,12 @@ func.func @dynamic_pad_tensor_3_4(%input_tensor: tensor<?x?xf32>,
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -68,10 +70,12 @@ func.func @dynamic_pad_tensor_0_3(%input_tensor: tensor<?x?xf32>,
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop = transform.structured.tile_using_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop = transform.structured.tile_using_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -102,10 +106,12 @@ func.func @static_pad_tensor_3_4(%input_tensor: tensor<7x9xf32>,
   return %0 : tensor<15x16xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -134,10 +140,12 @@ func.func @static_pad_tensor_0_3(%input_tensor: tensor<7x9xf32>,
   return %0 : tensor<15x16xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop = transform.structured.tile_using_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop = transform.structured.tile_using_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -172,10 +180,12 @@ func.func @static_pad_tile_evenly_0_3(%input_tensor: tensor<7x9xf32>,
   return %0 : tensor<14x15xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loop = transform.structured.tile_using_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loop = transform.structured.tile_using_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -208,10 +218,12 @@ func.func @NC_to_NCnc(%arg0: tensor<128x256xf32>, %arg1: tensor<4x8x32x32xf32>)
   return %0 : tensor<4x8x32x32xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -236,10 +248,12 @@ func.func @KC_to_CKkc(%arg0: tensor<128x256xf32>, %arg1: tensor<32x4x32x8xf32>)
   return %0 : tensor<32x4x32x8xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -271,10 +285,12 @@ func.func @pad_and_pack_static(%input: tensor<13x15xf32>, %output: tensor<2x8x8x
   return %0 : tensor<2x8x8x2xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -320,10 +336,12 @@ func.func @pad_and_pack_partially_dynamic(%input: tensor<?x?xf32>, %output: tens
   return %0 : tensor<?x?x8x2xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -374,10 +392,12 @@ func.func @pad_and_pack_fully_dynamic(%source: tensor<?x?xf32>, %dest: tensor<?x
   return %0 : tensor<?x?x?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -421,10 +441,12 @@ func.func @NCnc_to_NC(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>
   return %0 : tensor<256x128xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -467,10 +489,12 @@ func.func @CKkc_to_KC(%source: tensor<32x4x32x8xf32>, %dest: tensor<128x256xf32>
   return %0 : tensor<128x256xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -503,10 +527,12 @@ func.func @perfect_CKkc_to_KC(%source: tensor<32x4x2x4xf32>, %dest: tensor<8x128
   return %0 : tensor<8x128xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -545,10 +571,12 @@ func.func @dynamic_perfect_CKkc_to_KC(%source: tensor<?x?x2x2xf32>, %dest: tenso
   return %0 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -580,10 +608,12 @@ func.func @perfect_NKPQk_to_NPQK(%source: tensor<1x4x6x6x2xf32>, %dest: tensor<1
   return %0 : tensor<1x6x6x8xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 1, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 1, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -606,10 +636,12 @@ func.func @fully_dynamic_unpack(%source: tensor<?x?x?x?xf32>, %dest: tensor<?x?x
   return %1 : tensor<?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:2 = transform.structured.tile_using_for %0 [4, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:2 = transform.structured.tile_using_for %0 [4, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
 
 // -----
@@ -640,8 +672,10 @@ func.func @perfect_NPQK_to_NKPQk(%source: tensor<1x6x6x8xf32>, %dest: tensor<1x4
   return %0 : tensor<1x4x6x6x2xf32>
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
-    %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-    %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+      %1, %loops:4 = transform.structured.tile_using_for %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+      transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Tensor/transform-op-make-loop-independent.mlir b/mlir/test/Dialect/Tensor/transform-op-make-loop-independent.mlir
index d379888c14680a6..382dd25914b790d 100644
--- a/mlir/test/Dialect/Tensor/transform-op-make-loop-independent.mlir
+++ b/mlir/test/Dialect/Tensor/transform-op-make-loop-independent.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s -allow-unregistered-dialect \
-// RUN:     -test-transform-dialect-interpreter -canonicalize \
+// RUN:     -transform-interpreter -canonicalize \
 // RUN:     -split-input-file -verify-diagnostics | FileCheck %s
 
 // This is a test case where "high" padding depends on the IV.
@@ -29,10 +29,12 @@ func.func @make_pad_loop_independent_1(%lb: index, %ub: index, %step: index,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.tensor.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.tensor.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -66,10 +68,12 @@ func.func @make_pad_loop_independent_1(%lb: index, %ub: index, %step: index,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.tensor.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.tensor.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -92,10 +96,12 @@ func.func @two_loops(%lb: index, %ub: index, %step: index,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.tensor.make_loop_independent %0 {num_loops = 2} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.tensor.make_loop_independent %0 {num_loops = 2} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -116,11 +122,13 @@ func.func @not_enough_loops(%lb: index, %ub: index, %step: index,
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  // expected-error at below {{could not find 2-th enclosing loop}}
-  %1 = transform.tensor.make_loop_independent %0 {num_loops = 3} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    // expected-error at below {{could not find 2-th enclosing loop}}
+    %1 = transform.tensor.make_loop_independent %0 {num_loops = 3} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -144,8 +152,10 @@ func.func @make_empty_loop_independent(%lb: index, %ub: index, %step: index) {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = transform.tensor.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %1 = transform.tensor.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/transform-op-vector-to-llvm.mlir b/mlir/test/Dialect/Vector/transform-op-vector-to-llvm.mlir
index 73eb45470ab798d..271cdf0e059f4c3 100644
--- a/mlir/test/Dialect/Vector/transform-op-vector-to-llvm.mlir
+++ b/mlir/test/Dialect/Vector/transform-op-vector-to-llvm.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -verify-diagnostics -allow-unregistered-dialect -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -verify-diagnostics -allow-unregistered-dialect -split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @lower_to_llvm
 //   CHECK-NOT:   vector.bitcast
@@ -8,12 +8,14 @@ func.func @lower_to_llvm(%input: vector<f32>) -> vector<i32> {
   return %0 : vector<i32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.apply_conversion_patterns to %0 {
-    transform.apply_conversion_patterns.vector.vector_to_llvm
-  } with type_converter {
-    transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
-  } {legal_dialects = ["func", "llvm"]} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.apply_conversion_patterns to %0 {
+      transform.apply_conversion_patterns.vector.vector_to_llvm
+    } with type_converter {
+      transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+    } {legal_dialects = ["func", "llvm"]} : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/transform-vector.mlir b/mlir/test/Dialect/Vector/transform-vector.mlir
index 0c1b0e97f1fce52..a0ca8c2fa9b6a37 100644
--- a/mlir/test/Dialect/Vector/transform-vector.mlir
+++ b/mlir/test/Dialect/Vector/transform-vector.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @matmul_tensors
 func.func @matmul_tensors(
@@ -13,54 +13,56 @@ func.func @matmul_tensors(
   return %0 : tensor<8x32xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op
-  %1, %loops:3 = transform.structured.tile_using_for %0 [8, 4, 2]
-    : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
-  %2 = get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op
-  %b = transform.bufferization.one_shot_bufferize
-      layout{IdentityLayoutMap} %module_op
-      {bufferize_function_boundaries = true, allow_return_allocs = true}
-      : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.consumed}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op
+    %1, %loops:3 = transform.structured.tile_using_for %0 [8, 4, 2]
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    %2 = transform.get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op
+    %b = transform.bufferization.one_shot_bufferize
+        layout{IdentityLayoutMap} %module_op
+        {bufferize_function_boundaries = true, allow_return_allocs = true}
+        : (!transform.any_op) -> !transform.any_op
 
-  %f = transform.structured.match ops{["func.func"]} in %b
-    : (!transform.any_op) -> !transform.any_op
+    %f = transform.structured.match ops{["func.func"]} in %b
+      : (!transform.any_op) -> !transform.any_op
 
-  // TODO: group these lower-level controls into various properly named vector
-  // lowering TD macros.
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
-  } : !transform.any_op
+    // TODO: group these lower-level controls into various properly named vector
+    // lowering TD macros.
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.transfer_permutation_patterns
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.transfer_permutation_patterns
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerparallel"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerparallel"
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.transfer_to_scf max_transfer_rank = 1 full_unroll = true
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_transfer max_transfer_rank = 1
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_transfer max_transfer_rank = 1
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_shape_cast
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_shape_cast
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_1d"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_1d"
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -81,10 +83,12 @@ func.func @fold_arith_extf_into_contract(%arg0: vector<64x64xf16>, %arg1: vector
     return %result : vector<64x64xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %func = transform.structured.match ops{["func.func"]} in %module_op : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %func {
-    transform.apply_patterns.vector.fold_arith_extension
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %func = transform.structured.match ops{["func.func"]} in %module_op : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %func {
+      transform.apply_patterns.vector.fold_arith_extension
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir
index c537ad7753dfb3a..8774a7513c9a24a 100644
--- a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @broadcast_vec1d_from_scalar
 // CHECK-SAME: %[[A:.*0]]: f32
@@ -173,12 +173,14 @@ func.func @broadcast_scalable_duplication(%arg0: vector<[32]xf32>) -> vector<1x[
   return %res : vector<1x[32]xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_broadcast
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_broadcast
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-contract-matvec-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-matvec-transforms.mlir
index 0314639ffdcf9c0..cfcb14a477b6b71 100644
--- a/mlir/test/Dialect/Vector/vector-contract-matvec-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-matvec-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 #matvec_accesses = [
   affine_map<(i, j) -> (i, j)>,
@@ -208,9 +208,11 @@ func.func @redpar_vecmattrans2x2(%arg0: memref<vector<2x2xf32>>, %arg1: memref<v
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-contract-to-dot-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-dot-transforms.mlir
index 0aee0272c9570a1..0ba185bb8476095 100644
--- a/mlir/test/Dialect/Vector/vector-contract-to-dot-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-to-dot-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 #dotp_accesses = [
   affine_map<(i) -> (i)>,
@@ -295,12 +295,14 @@ func.func @contract_one_sided_unit_reduction_dim(%arg0 : vector<1x2xi32>, %arg1
   return %res : vector<2xi32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "dot"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "dot"
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir
index 84bef86b366aa52..3c5539d1aea6edc 100644
--- a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 #matmat_accesses = [
   affine_map<(i, j, k) -> (i, k)>,
@@ -43,16 +43,18 @@ func.func @matmul(%arg0: vector<2x4xf32>,
   return %0 : vector<2x3xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "matmulintrinsics"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "matmulintrinsics"
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_shape_cast
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_shape_cast
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms-unsupported.mlir b/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms-unsupported.mlir
index a955250107d73d7..954aa13c3e77b37 100644
--- a/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms-unsupported.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms-unsupported.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics
+// RUN: mlir-opt %s --transform-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics
 
 #matvec_accesses = [
   affine_map<(i, j) -> (i, j)>,
@@ -22,12 +22,14 @@ func.func @masked_extract_contract2_scalable_reduction_dim(%arg0: vector<[2]x[3]
   return %0 : vector<[2]xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms.mlir
index 1e92fcff64dea57..d5a7db8709739fd 100644
--- a/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 #matvec_accesses = [
   affine_map<(i, j) -> (i, j)>,
@@ -534,12 +534,14 @@ func.func @masked_tmatvec_k_km_m(%arg0: vector<2x4xf32>, %arg1: vector<2xf32>, %
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-contract-to-parallel-arith-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-parallel-arith-transforms.mlir
index 5445829c1dbb542..e93c5a08bdc7c91 100644
--- a/mlir/test/Dialect/Vector/vector-contract-to-parallel-arith-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-to-parallel-arith-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @parallel_contract_lowering
 //       CHECK:   %[[E0:.*]] = vector.extract %{{.*}}[0, 0] : vector<4xf32> from vector<1x1x4xf32>
@@ -51,12 +51,14 @@ func.func @parallel_contract_lowering_scalar(%arg0: vector<1x1xf32>, %arg1: vect
   return %0 : f32
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_contraction lowering_strategy = "parallelarith"
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_contraction lowering_strategy = "parallelarith"
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-mask-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-mask-lowering-transforms.mlir
index d425e9c9ab1a6c9..f74af8eda0e6f4a 100644
--- a/mlir/test/Dialect/Vector/vector-mask-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-mask-lowering-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @genbool_1d
 // CHECK: %[[T0:.*]] = arith.constant dense<[true, true, true, true, false, false, false, false]> : vector<8xi1>
@@ -91,14 +91,16 @@ func.func @genbool_var_3d(%arg0: index, %arg1: index, %arg2: index) -> vector<2x
   return %0 : vector<2x1x7xi1>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_masks
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_masks
+    } : !transform.any_op
+    transform.yield
+  }
 }
 
 // -----
@@ -114,20 +116,22 @@ func.func @transfer_read_3d(
   //      CHECK: vector.transfer_read {{.*}}, %[[mask]] {in_bounds = [true, true, true]}
   // CHECK-SAME:   : tensor<?x?x?xf32>, vector<2x1x7xf32>
   %0 = vector.create_mask %arg0, %arg1, %arg2 : vector<2x1x7xi1>
-  %1 = vector.mask %0 { 
+  %1 = vector.mask %0 {
     vector.transfer_read %t[%c0, %c0, %c0], %f0 {in_bounds = [true, true, true]}
-      : tensor<?x?x?xf32>, vector<2x1x7xf32> 
+      : tensor<?x?x?xf32>, vector<2x1x7xf32>
   } : vector<2x1x7xi1> -> vector<2x1x7xf32>
 
   return %1: vector<2x1x7xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_masked_transfers
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_masked_transfers
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-materialize-mask.mlir b/mlir/test/Dialect/Vector/vector-materialize-mask.mlir
index 3d3d643168cdb46..c47d91bb6ed921d 100644
--- a/mlir/test/Dialect/Vector/vector-materialize-mask.mlir
+++ b/mlir/test/Dialect/Vector/vector-materialize-mask.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 func.func @select_single_i1_vector(%cond : i1) -> vector<1xi1> {
   %true = arith.constant dense<true> : vector<1xi1>
@@ -7,11 +7,13 @@ func.func @select_single_i1_vector(%cond : i1) -> vector<1xi1> {
   return %select : vector<1xi1>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.materialize_masks
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.materialize_masks
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // CHECK-LABEL: func @select_single_i1_vector
diff --git a/mlir/test/Dialect/Vector/vector-multi-reduction-lowering.mlir b/mlir/test/Dialect/Vector/vector-multi-reduction-lowering.mlir
index 813e61cf562f404..17cdda65fb2ca9c 100644
--- a/mlir/test/Dialect/Vector/vector-multi-reduction-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-multi-reduction-lowering.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
 
 func.func @vector_multi_reduction(%arg0: vector<2x4xf32>, %acc: vector<2xf32>) -> vector<2xf32> {
     %0 = vector.multi_reduction <mul>, %arg0, %acc [1] : vector<2x4xf32> to vector<2xf32>
@@ -281,9 +281,11 @@ func.func private @scalable_dims(%A : vector<8x[4]x2xf32>, %B: vector<8x[4]xf32>
 // CHECK:           %[[VAL_163:.*]] = vector.shape_cast %[[VAL_162]] : vector<[32]xf32> to vector<8x[4]xf32>
 // CHECK:           return %[[VAL_163]] : vector<8x[4]xf32>
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerreduction"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerreduction"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir b/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir
index e01171b83572607..12ea87ffb1413f4 100644
--- a/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
 
 func.func @vector_multi_reduction(%arg0: vector<2x4xf32>, %acc: vector<2xf32>) -> vector<2xf32> {
     %0 = vector.multi_reduction <mul>, %arg0, %acc [1] : vector<2x4xf32> to vector<2xf32>
@@ -188,9 +188,11 @@ func.func @vector_multi_reduction_to_scalar(%arg0: vector<2x3xf32>, %acc: f32) -
 // CHECK-LABEL: func @vector_multi_reduction_to_scalar
 //       CHECK:   return %{{.+}}
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerparallel"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerparallel"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir
index baa065aa765d90d..059d955f773131e 100644
--- a/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @outerproduct_noacc
 // CHECK-SAME: %[[A:.*0]]: vector<2xf32>,
@@ -135,16 +135,18 @@ func.func @axpy_int_add(%arg0: vector<16xi32>, %arg1: i32, %arg2: vector<16xi32>
   return %0: vector<16xi32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op 
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_outerproduct
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_outerproduct
+    } : !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_broadcast
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_broadcast
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
index 7754c70458d32b5..a600fa955b17003 100644
--- a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
+++ b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
@@ -1,8 +1,8 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 /// Note: Inspect generated assembly and llvm-mca stats:
 /// ====================================================
-/// mlir-opt --test-transform-dialect-interpreter mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir -test-transform-dialect-erase-schedule -test-lower-to-llvm | mlir-translate -mlir-to-llvmir | llc -o - -mcpu=skylake-avx512 --function-sections -filetype=obj > /tmp/a.out; objdump -d --disassemble=f1 --no-addresses --no-show-raw-insn -M att /tmp/a.out | ./build/bin/llvm-mca -mcpu=skylake-avx512
+/// mlir-opt --transform-interpreter mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir -test-transform-dialect-erase-schedule -test-lower-to-llvm | mlir-translate -mlir-to-llvmir | llc -o - -mcpu=skylake-avx512 --function-sections -filetype=obj > /tmp/a.out; objdump -d --disassemble=f1 --no-addresses --no-show-raw-insn -M att /tmp/a.out | ./build/bin/llvm-mca -mcpu=skylake-avx512
 
 // CHECK-LABEL: func.func @f1(
 //  CHECK-SAME: %[[A:[0-9a-z]*]]: vector<32xi64>) -> vector<20xi8>
@@ -19,23 +19,23 @@ func.func @f1(%a: vector<32xi64>) -> vector<20xi8> {
   // { 6: b@[2..5) lshl: 0 } { 7: b@[0..5) lshl: 3 }
   // { 8: b@[0..5) lshl: 0 } { 9: b@[0..3) lshl: 5 }
   // { 9: b@[3..5) lshl: 0 } { 10: b@[0..5) lshl: 2 } { 11: b@[0..1) lshl: 7 }
-  // { 11: b@[1..5) lshl: 0 } { 12: b@[0..4) lshl: 4 }                      
+  // { 11: b@[1..5) lshl: 0 } { 12: b@[0..4) lshl: 4 }
   // { 12: b@[4..5) lshl: 0 } { 13: b@[0..5) lshl: 1 } { 14: b@[0..2) lshl: 6 }
-  // { 14: b@[2..5) lshl: 0 } { 15: b@[0..5) lshl: 3 }                      
-  // { 16: b@[0..5) lshl: 0 } { 17: b@[0..3) lshl: 5 }                      
+  // { 14: b@[2..5) lshl: 0 } { 15: b@[0..5) lshl: 3 }
+  // { 16: b@[0..5) lshl: 0 } { 17: b@[0..3) lshl: 5 }
   // { 17: b@[3..5) lshl: 0 } { 18: b@[0..5) lshl: 2 } { 19: b@[0..1) lshl: 7 }
-  // { 19: b@[1..5) lshl: 0 } { 20: b@[0..4) lshl: 4 }                      
+  // { 19: b@[1..5) lshl: 0 } { 20: b@[0..4) lshl: 4 }
   // { 20: b@[4..5) lshl: 0 } { 21: b@[0..5) lshl: 1 } { 22: b@[0..2) lshl: 6 }
-  // { 22: b@[2..5) lshl: 0 } { 23: b@[0..5) lshl: 3 }                      
-  // { 24: b@[0..5) lshl: 0 } { 25: b@[0..3) lshl: 5 }                      
+  // { 22: b@[2..5) lshl: 0 } { 23: b@[0..5) lshl: 3 }
+  // { 24: b@[0..5) lshl: 0 } { 25: b@[0..3) lshl: 5 }
   // { 25: b@[3..5) lshl: 0 } { 26: b@[0..5) lshl: 2 } { 27: b@[0..1) lshl: 7 }
-  // { 27: b@[1..5) lshl: 0 } { 28: b@[0..4) lshl: 4 }                      
+  // { 27: b@[1..5) lshl: 0 } { 28: b@[0..4) lshl: 4 }
   // { 28: b@[4..5) lshl: 0 } { 29: b@[0..5) lshl: 1 } { 30: b@[0..2) lshl: 6 }
-  // { 30: b@[2..5) lshl: 0 } { 31: b@[0..5) lshl: 3 }  
+  // { 30: b@[2..5) lshl: 0 } { 31: b@[0..5) lshl: 3 }
   /// This results in 3 shuffles + 1 shr + 2 shl + 3 and + 2 or.
   /// The third vector is empty for positions 0, 2, 4, 5, 7, 9, 10, 12, 14, 15,
   /// 17 and 19 (i.e. there are only 2 entries in that row).
-  /// 
+  ///
   ///                             0: b@[0..5), 1: b@[3..5), etc
   // CHECK-DAG: %[[MASK0:.*]] = arith.constant dense<[31, 24, 30, 16, 28, 31, 24, 30, 16, 28, 31, 24, 30, 16, 28, 31, 24, 30, 16, 28]> : vector<20xi64>
   ///                             1: b@[0..3), 2: b@[0..5), etc
@@ -70,8 +70,8 @@ func.func @f2(%a: vector<16xi16>) -> vector<3xi16> {
   /// Rewriting this standalone pattern is about 1.8x faster on skylake-ax512 according to llvm-mca.
   /// Benefit further increases when mixed with other compute ops.
   ///
-  // { 0: b@[0..3) lshl: 0 } { 1: b@[0..3) lshl: 3 } { 2: b@[0..3) lshl: 6 } { 3: b@[0..3) lshl: 9 } { 4: b@[0..3) lshl: 12 } { 5: b@[0..1) lshl: 15 } 
-  // { 5: b@[1..3) lshl: 0 } { 6: b@[0..3) lshl: 2 } { 7: b@[0..3) lshl: 5 } { 8: b@[0..3) lshl: 8 } { 9: b@[0..3) lshl: 11 } { 10: b@[0..2) lshl: 14 } 
+  // { 0: b@[0..3) lshl: 0 } { 1: b@[0..3) lshl: 3 } { 2: b@[0..3) lshl: 6 } { 3: b@[0..3) lshl: 9 } { 4: b@[0..3) lshl: 12 } { 5: b@[0..1) lshl: 15 }
+  // { 5: b@[1..3) lshl: 0 } { 6: b@[0..3) lshl: 2 } { 7: b@[0..3) lshl: 5 } { 8: b@[0..3) lshl: 8 } { 9: b@[0..3) lshl: 11 } { 10: b@[0..2) lshl: 14 }
   // { 10: b@[2..3) lshl: 0 } { 11: b@[0..3) lshl: 1 } { 12: b@[0..3) lshl: 4 } { 13: b@[0..3) lshl: 7 } { 14: b@[0..3) lshl: 10 } { 15: b@[0..3) lshl: 13 }
   ///                                             0: b@[0..3), 5: b@[1..3), 10: b@[2..3)
   // CHECK-DAG: %[[MASK0:.*]] = arith.constant dense<[7, 6, 4]> : vector<3xi16>
@@ -193,12 +193,14 @@ func.func @f3ext(%a: vector<5xi8>) -> vector<8xi17> {
   return %1 : vector<8xi17>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op
-      : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+        : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.rewrite_narrow_types
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.rewrite_narrow_types
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-scalable-create-mask-lowering.mlir b/mlir/test/Dialect/Vector/vector-scalable-create-mask-lowering.mlir
index 138e647c751ab8f..95c77a43155e7e0 100644
--- a/mlir/test/Dialect/Vector/vector-scalable-create-mask-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-scalable-create-mask-lowering.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
 
 // CHECK-LABEL: func.func @create_mask_2d_trailing_scalable(
 // CHECK-SAME: %[[arg:.*]]: index) -> vector<3x[4]xi1> {
@@ -29,12 +29,14 @@ func.func @cannot_create_mask_2d_leading_scalable(%a: index) -> vector<[4]x4xi1>
   return %mask : vector<[4]x4xi1>
 }
 
-transform.sequence failures(suppress) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_create_mask
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_create_mask
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir
index 3312c819e8cd9b4..fde6ce910244647 100644
--- a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir
+++ b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
 
 /// This tests that shape casts of scalable vectors (with one trailing scalable dim)
 /// can be correctly lowered to vector.scalable.insert/extract.
@@ -203,12 +203,14 @@ func.func @cannot_shape_cast_more_than_one_scalable_dim(%arg0: vector<[4]x[4]xf3
   return %res: vector<2x[2]x[4]xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_shape_cast
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_shape_cast
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir
index 7e59cd3c41e74f0..f2f1211fd70eed4 100644
--- a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @nop_shape_cast
 // CHECK-SAME: %[[A:.*]]: vector<16xf32>
@@ -149,12 +149,14 @@ func.func @shape_cast_1d0d(%arg0 : vector<1xf32>) -> vector<f32> {
   return %s : vector<f32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op
-    : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.lower_shape_cast
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.lower_shape_cast
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/unittests/Dialect/Transform/Preload.cpp b/mlir/unittests/Dialect/Transform/Preload.cpp
index d3c3044e0e0f776..7d66de0fe48ef80 100644
--- a/mlir/unittests/Dialect/Transform/Preload.cpp
+++ b/mlir/unittests/Dialect/Transform/Preload.cpp
@@ -74,19 +74,21 @@ TEST(Preload, ContextPreloadConstructedLibrary) {
   EXPECT_TRUE(retrievedTransformLibrary)
       << "failed to retrieve transform module";
 
-  transform::TransformOpInterface entryPoint =
-      transform::detail::findTransformEntryPoint(inputModule->getOperation(),
-                                                 retrievedTransformLibrary);
-  EXPECT_TRUE(entryPoint) << "failed to find entry point";
-
   OwningOpRef<Operation *> clonedTransformModule(
       retrievedTransformLibrary->clone());
+
   LogicalResult res = transform::detail::mergeSymbolsInto(
       inputModule->getOperation(), std::move(clonedTransformModule));
   EXPECT_TRUE(succeeded(res)) << "failed to define declared symbols";
 
+  transform::TransformOpInterface entryPoint =
+      transform::detail::findTransformEntryPoint(inputModule->getOperation(),
+                                                 retrievedTransformLibrary);
+  EXPECT_TRUE(entryPoint) << "failed to find entry point";
+
   transform::TransformOptions options;
   res = transform::applyTransformNamedSequence(
-      inputModule->getOperation(), retrievedTransformLibrary, options);
+      inputModule->getOperation(), entryPoint, retrievedTransformLibrary,
+      options);
   EXPECT_TRUE(succeeded(res)) << "failed to apply named sequence";
 }

>From 9dad2b1cdaac163f904386b61f863dd4a17702e1 Mon Sep 17 00:00:00 2001
From: Alex Zinenko <zinenko at google.com>
Date: Tue, 24 Oct 2023 13:25:38 +0000
Subject: [PATCH 2/2] Also update integration tests

---
 ...ctor-transfer-drop-unit-dims-patterns.mlir |  14 +-
 ...fer-full-partial-split-copy-transform.mlir |  38 ++---
 .../vector-transfer-full-partial-split.mlir   |  62 ++++----
 .../vector-transfer-permutation-lowering.mlir |  20 +--
 .../vector-transfer-to-vector-load-store.mlir |  54 +++----
 .../Vector/vector-transpose-lowering.mlir     |  74 ++++++----
 .../Dialect/Linalg/CPU/ArmSME/fill-2d.mlir    |  12 +-
 .../Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir    |  12 +-
 .../Dialect/Linalg/CPU/test-conv-1d-call.mlir |   8 +-
 .../Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir |   8 +-
 .../Dialect/Linalg/CPU/test-conv-2d-call.mlir |   8 +-
 .../CPU/test-conv-2d-nhwc-hwcf-call.mlir      |   8 +-
 .../Dialect/Linalg/CPU/test-conv-3d-call.mlir |   8 +-
 .../CPU/test-conv-3d-ndhwc-dhwcf-call.mlir    |   8 +-
 .../Linalg/CPU/test-matmul-masked-vec.mlir    |  20 +--
 .../Linalg/CPU/test-tensor-matmul.mlir        |   8 +-
 .../Dialect/Transform/match_batch_matmul.mlir |   8 +-
 .../Dialect/Transform/match_matmul.mlir       |   8 +-
 .../Dialect/Transform/match_reduction.mlir    |  24 ++--
 .../Vector/CPU/test-rewrite-narrow-types.mlir |  22 +--
 .../Dialect/Vector/CPU/test-shuffle16x16.mlir |  14 +-
 ...ansform-mma-sync-matmul-f16-f16-accum.mlir | 136 +++++++++---------
 .../sm80/transform-mma-sync-matmul-f32.mlir   |  20 +--
 ...a_load_64x8_8x128_noswizzle-transform.mlir |  22 +--
 24 files changed, 342 insertions(+), 274 deletions(-)

diff --git a/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir b/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
index ed7d506722a31ed..2852e301888cca8 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
 
 func.func @transfer_read_rank_reducing(
       %arg : memref<1x1x3x2xi8, strided<[6, 6, 2, 1], offset: ?>>) -> vector<3x2xi8> {
@@ -82,9 +82,11 @@ func.func @transfer_write_and_vector_rank_reducing_to_0d(
 //       CHECK:   %[[SHCAST:.+]] = vector.shape_cast %[[VECTOR]] : vector<1x1x1xf32> to vector<f32>
 //       CHECK:   vector.transfer_write %[[SHCAST]], %[[SUBVIEW]]{{.*}} : vector<f32>, memref<f32>
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.rank_reducing_subview_patterns
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.rank_reducing_subview_patterns
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split-copy-transform.mlir b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split-copy-transform.mlir
index a5e80c32599766e..bcb8e1a10c8493a 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split-copy-transform.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split-copy-transform.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-DAG: #[[$map_p4:.*]] = affine_map<()[s0] -> (s0 + 4)>
 // CHECK-DAG: #[[$map_p8:.*]] = affine_map<()[s0] -> (s0 + 8)>
@@ -106,11 +106,13 @@ func.func @split_vector_transfer_read_strided_2d(
   return %1 : vector<4x8xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -167,11 +169,13 @@ func.func @split_vector_transfer_write_2d(%V: vector<4x8xf32>, %A: memref<?x8xf3
 // CHECK:           return
 // CHECK:         }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -235,9 +239,11 @@ func.func @split_vector_transfer_write_strided_2d(
 // CHECK:           return
 // CHECK:         }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "linalg-copy"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
index 956bbd47ebbf1a4..644de885bbaa9d5 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 
 // CHECK-DAG: #[[$map_p4:.*]] = affine_map<()[s0] -> (s0 + 4)>
@@ -132,11 +132,13 @@ func.func @split_vector_transfer_read_mem_space(%A: memref<?x8xf32, 3>, %i: inde
   return %1: vector<4x8xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -190,11 +192,13 @@ func.func @split_vector_transfer_write_2d(%V: vector<4x8xf32>, %A: memref<?x8xf3
 // CHECK:         }
 
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -252,11 +256,13 @@ func.func @split_vector_transfer_write_strided_2d(
 // CHECK:           return
 // CHECK:         }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -285,11 +291,13 @@ func.func @split_vector_transfer_write_mem_space(%V: vector<4x8xf32>, %A: memref
 // CHECK-SAME:           {in_bounds = [true, true]} : vector<4x8xf32>, memref<?x8xf32, strided<[8, 1]>>
 
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 
@@ -328,9 +336,11 @@ func.func @transfer_read_within_scf_for(%A : memref<?x?xf32>, %lb : index, %ub :
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.split_transfer_full_partial split_transfer_strategy = "vector-transfer"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index 0b738cbfcd7cadb..13e07f59a72a775 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @lower_permutation_with_mask_fixed_width(
 //       CHECK:   %[[vec:.*]] = arith.constant dense<-2.000000e+00> : vector<7x1xf32>
@@ -35,17 +35,19 @@ func.func @permutation_with_mask_scalable(%2: memref<?x?xf32>, %dim_1: index, %d
   %cst_0 = arith.constant 0.000000e+00 : f32
 
   %mask = vector.create_mask %dim_2, %dim_1 : vector<2x[4]xi1>
-  %1 = vector.transfer_read %2[%c0, %c0], %cst_0, %mask 
+  %1 = vector.transfer_read %2[%c0, %c0], %cst_0, %mask
     {in_bounds = [true, true, true], permutation_map = affine_map<(d0, d1) -> (0, d1, d0)>}
     : memref<?x?xf32>, vector<8x[4]x2xf32>
   return %1 : vector<8x[4]x2xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op
-    : (!transform.any_op) -> !transform.any_op
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.transfer_permutation_patterns
-  } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+      : (!transform.any_op) -> !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.transfer_permutation_patterns
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir b/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir
index e0df01b6735445e..7aaaff70e5242e3 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -canonicalize --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter -canonicalize --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @vector_transfer_ops_0d_memref(
 //  CHECK-SAME:   %[[MEM:.*]]: memref<f32>
@@ -182,8 +182,8 @@ func.func @transfer_perm_map(%mem : memref<8x8xf32>, %i : index) -> vector<4xf32
 #broadcast_1d = affine_map<(d0, d1) -> (0)>
 func.func @transfer_broadcasting(%mem : memref<8x8xf32>, %i : index) -> vector<4xf32> {
   %cf0 = arith.constant 0.0 : f32
-  %res = vector.transfer_read %mem[%i, %i], %cf0 
-    {in_bounds = [true], permutation_map = #broadcast_1d} 
+  %res = vector.transfer_read %mem[%i, %i], %cf0
+    {in_bounds = [true], permutation_map = #broadcast_1d}
       : memref<8x8xf32>, vector<4xf32>
   return %res : vector<4xf32>
 }
@@ -213,8 +213,8 @@ func.func @transfer_scalar(%mem : memref<?x?xf32>, %i : index) -> vector<1xf32>
 #broadcast_2d = affine_map<(d0, d1) -> (0, 0)>
 func.func @transfer_broadcasting_2D(%mem : memref<8x8xf32>, %i : index) -> vector<4x4xf32> {
   %cf0 = arith.constant 0.0 : f32
-  %res = vector.transfer_read %mem[%i, %i], %cf0 
-    {in_bounds = [true, true], permutation_map = #broadcast_2d} 
+  %res = vector.transfer_read %mem[%i, %i], %cf0
+    {in_bounds = [true, true], permutation_map = #broadcast_2d}
       : memref<8x8xf32>, vector<4x4xf32>
   return %res : vector<4x4xf32>
 }
@@ -231,19 +231,21 @@ func.func @transfer_broadcasting_2D(%mem : memref<8x8xf32>, %i : index) -> vecto
 #broadcast_2d_in_4d = affine_map<(d0, d1, d2, d3, d4) -> (d1, 0, 0, d4)>
 func.func @transfer_broadcasting_complex(%mem : memref<10x20x30x8x8xf32>, %i : index) -> vector<3x2x4x5xf32> {
   %cf0 = arith.constant 0.0 : f32
-  %res = vector.transfer_read %mem[%i, %i, %i, %i, %i], %cf0 
-    {in_bounds = [true, true, true, true], permutation_map = #broadcast_2d_in_4d} 
+  %res = vector.transfer_read %mem[%i, %i, %i, %i, %i], %cf0
+    {in_bounds = [true, true, true, true], permutation_map = #broadcast_2d_in_4d}
       : memref<10x20x30x8x8xf32>, vector<3x2x4x5xf32>
   return %res : vector<3x2x4x5xf32>
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transfer max_transfer_rank = 99
-    transform.apply_patterns.vector.transfer_permutation_patterns
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transfer max_transfer_rank = 99
+      transform.apply_patterns.vector.transfer_permutation_patterns
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -360,12 +362,14 @@ func.func @transfer_write_broadcast_unit_dim(
   return %0 : tensor<?x?x?x?xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transfer max_transfer_rank = 99
-    transform.apply_patterns.vector.transfer_permutation_patterns
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transfer max_transfer_rank = 99
+      transform.apply_patterns.vector.transfer_permutation_patterns
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -386,9 +390,11 @@ func.func @transfer_2D_masked(%mem : memref<?x?xf32>, %mask : vector<2x4xi1>) ->
   return %res : vector<2x4xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transfer max_transfer_rank = 2
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transfer max_transfer_rank = 2
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir b/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir
index 670bb87186ddfb7..22d9224838c49c4 100644
--- a/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --split-input-file | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @transpose23
 // CHECK-SAME: %[[A:.*]]: vector<2x3xf32>
@@ -74,11 +74,13 @@ func.func @transpose1023_1x1x8x8xf32(%arg0: vector<1x1x8x8xf32>) -> vector<1x1x8
   return %0 : vector<1x1x8x8xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "eltwise"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "eltwise"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -97,11 +99,13 @@ func.func @transpose(%arg0: vector<2x4xf32>) -> vector<4x2xf32> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_1d"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_1d"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -116,11 +120,13 @@ func.func @transpose(%arg0: vector<2x4xf32>) -> vector<4x2xf32> {
 }
 
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "flat_transpose"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "flat_transpose"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -603,11 +609,13 @@ func.func @transpose210_1x8x8xf32(%arg0: vector<1x8x8xf32>) -> vector<8x8x1xf32>
   return %0 : vector<8x8x1xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose avx2_lowering_strategy = true
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose avx2_lowering_strategy = true
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -681,11 +689,13 @@ func.func @transpose_shuffle16x16xf32(%arg0: vector<16x16xf32>) -> vector<16x16x
   return %0 : vector<16x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_16x16"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_16x16"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
 // -----
@@ -760,9 +770,11 @@ func.func @transpose021_shuffle16x16xf32(%arg0: vector<1x16x16xf32>) -> vector<1
   return %0 : vector<1x16x16xf32>
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_16x16"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_16x16"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
index 08f14dfae3249f2..fbb29ee017efca7 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s \
-// RUN:   -test-transform-dialect-interpreter \
+// RUN:   -transform-interpreter \
 // RUN:   -test-transform-dialect-erase-schedule \
 // RUN:   -lower-vector-mask \
 // RUN:   -one-shot-bufferize="bufferize-function-boundaries" \
@@ -109,10 +109,12 @@ func.func @entry() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [[4], [4]] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [[4], [4]] : !transform.any_op
+    transform.yield
+  }
 }
 
 llvm.func @printCString(!llvm.ptr<i8>)
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir
index c3f49b2f39cf137..1d07e3e90047def 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -lower-vector-mask -one-shot-bufferize -test-lower-to-llvm | \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -lower-vector-mask -one-shot-bufferize -test-lower-to-llvm | \
 // RUN: %mcr_aarch64_cmd -e=entry -entry-point-result=void --march=aarch64 --mattr="+sve" -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils | \
 // RUN: FileCheck %s
 
@@ -46,10 +46,12 @@ func.func @entry() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  transform.structured.vectorize %0 vector_sizes [[4]] : !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [[4]] : !transform.any_op
+    transform.yield
+  }
 }
 
 llvm.func @printCString(!llvm.ptr<i8>)
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir
index b3f2b441f669649..443963fb8c590d8 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-call.mlir
@@ -3,7 +3,7 @@
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
 // RUN:    -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
@@ -24,10 +24,12 @@ func.func @conv_1d(%arg0: memref<?xf32>, %arg1: memref<?xf32>, %arg2: memref<?xf
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_1d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loop = transform.structured.tile_using_for %0 [4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir
index 5c4bde826f913f3..f652d707de05eff 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-1d-nwc-wcf-call.mlir
@@ -3,7 +3,7 @@
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
 // RUN:    -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
@@ -26,10 +26,12 @@ func.func @conv_1d_nwc_wcf(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_1d_nwc_wcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:2 = transform.structured.tile_using_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir
index b9584527c6e587f..2eaba8233d69e2c 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-call.mlir
@@ -3,7 +3,7 @@
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
 // RUN:    -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
@@ -24,10 +24,12 @@ func.func @conv_2d(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_2d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:2 = transform.structured.tile_using_for %0 [2, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir
index a32f3f55848e32c..eac8d8a6ea437a9 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-2d-nhwc-hwcf-call.mlir
@@ -3,7 +3,7 @@
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
 // RUN:    -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
@@ -26,10 +26,12 @@ func.func @conv_2d_nhwc_hwcf(%arg0: memref<?x?x?x?xf32>, %arg1: memref<?x?x?x?xf
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:4 = transform.structured.tile_using_for %0 [2, 3, 3, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir
index 469481d6697bd06..d5584cd67702c0e 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-call.mlir
@@ -3,7 +3,7 @@
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
 // RUN:    -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
@@ -24,10 +24,12 @@ func.func @conv_3d(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: me
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_3d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:3 = transform.structured.tile_using_for %0 [2, 2, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir
index 75599442ee47037..7dca79334565625 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-conv-3d-ndhwc-dhwcf-call.mlir
@@ -3,7 +3,7 @@
 // RUN:   -shared-libs=%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -convert-linalg-to-loops -convert-scf-to-cf \
 // RUN:    -expand-strided-metadata -lower-affine -convert-arith-to-llvm -convert-scf-to-cf --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_runner_utils \
@@ -26,10 +26,12 @@ func.func @conv_3d_ndhwc_dhwcf(%arg0: memref<?x?x?x?x?xf32>, %arg1: memref<?x?x?
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.conv_3d_ndhwc_dhwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:3 = transform.structured.tile_using_for %0 [0, 5, 5, 5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir
index 64954098aa03c56..0378d638df61abe 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -one-shot-bufferize -func-bufferize -lower-vector-mask --test-lower-to-llvm | \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -one-shot-bufferize -func-bufferize -lower-vector-mask --test-lower-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void --shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \
 // RUN: FileCheck %s
 
@@ -47,12 +47,14 @@ func.func @main() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %func_op = get_parent_op %0 : (!transform.any_op) -> !transform.op<"func.func">
-  transform.structured.vectorize %0 vector_sizes [4, 4, 2] : !transform.any_op
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerreduction"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+    %func_op = transform.get_parent_op %0 : (!transform.any_op) -> !transform.op<"func.func">
+    transform.structured.vectorize %0 vector_sizes [4, 4, 2] : !transform.any_op
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerreduction"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
index f637884acad1d01..fda7ffb0c753c89 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
@@ -6,7 +6,7 @@
 // RUN:   -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -linalg-bufferize \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -linalg-bufferize \
 // RUN: -scf-bufferize -arith-bufferize -tensor-bufferize \
 // RUN: -func-bufferize \
 // RUN: -finalizing-bufferize -convert-linalg-to-loops -convert-scf-to-cf -convert-scf-to-cf \
@@ -36,10 +36,12 @@ func.func @main() {
   return
 }
 
-transform.sequence failures(propagate) {
-  ^bb0(%arg1: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
     %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
     %1, %loops:3 = transform.structured.tile_using_for %0 [1, 2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+    transform.yield
+  }
 }
 
 func.func private @printMemrefF32(%ptr : tensor<*xf32>)
diff --git a/mlir/test/Integration/Dialect/Transform/match_batch_matmul.mlir b/mlir/test/Integration/Dialect/Transform/match_batch_matmul.mlir
index 3914dc6198a69c2..0c3b212e2ade9c4 100644
--- a/mlir/test/Integration/Dialect/Transform/match_batch_matmul.mlir
+++ b/mlir/test/Integration/Dialect/Transform/match_batch_matmul.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter='transform-library-paths=%p/match_matmul_common.mlir' --verify-diagnostics
+// RUN: mlir-opt %s --transform-preload-library='transform-library-paths=%p/match_matmul_common.mlir' --transform-interpreter --verify-diagnostics
 
 module attributes { transform.with_named_sequence } {
   transform.named_sequence @_match_matmul_like(
@@ -42,11 +42,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) {
-  ^bb(%root: !transform.any_op):
-    foreach_match in %root
+  transform.named_sequence @__transform_main(%root: !transform.any_op {transform.consumed}) {
+    transform.foreach_match in %root
       @match_bmm -> @print_bmm
       : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
 
diff --git a/mlir/test/Integration/Dialect/Transform/match_matmul.mlir b/mlir/test/Integration/Dialect/Transform/match_matmul.mlir
index bb0f1125fd39716..6fb105a3297b377 100644
--- a/mlir/test/Integration/Dialect/Transform/match_matmul.mlir
+++ b/mlir/test/Integration/Dialect/Transform/match_matmul.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter='transform-library-paths=%p/match_matmul_common.mlir' --verify-diagnostics
+// RUN: mlir-opt %s --transform-preload-library='transform-library-paths=%p/match_matmul_common.mlir' --transform-interpreter --verify-diagnostics
 
 module attributes { transform.with_named_sequence } {
   transform.named_sequence @_match_matmul_like(
@@ -40,11 +40,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) {
-  ^bb(%root: !transform.any_op):
-    foreach_match in %root
+  transform.named_sequence @__transform_main(%root: !transform.any_op {transform.consumed}) {
+    transform.foreach_match in %root
       @match_matmul -> @print_matmul
       : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
 
diff --git a/mlir/test/Integration/Dialect/Transform/match_reduction.mlir b/mlir/test/Integration/Dialect/Transform/match_reduction.mlir
index c85547af4ef1dd9..28c87998900dd5d 100644
--- a/mlir/test/Integration/Dialect/Transform/match_reduction.mlir
+++ b/mlir/test/Integration/Dialect/Transform/match_reduction.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter --verify-diagnostics
+// RUN: mlir-opt %s --transform-interpreter --verify-diagnostics
 
 module attributes { transform.with_named_sequence } {
   transform.named_sequence @_reduce_leading_trailing(%entry: !transform.any_op {transform.readonly})
@@ -23,14 +23,14 @@ module attributes { transform.with_named_sequence } {
     %c2 = transform.param.constant 2 : i64 -> !transform.param<i64>
     %c4 = transform.param.constant 4 : i64 -> !transform.param<i64>
 
-    %rk, %dms, %bw, %operand_o, %init_v, %trailing_o = transform.match.structured failures(propagate) %entry 
+    %rk, %dms, %bw, %operand_o, %init_v, %trailing_o = transform.match.structured failures(propagate) %entry
         : (!transform.any_op) -> (!transform.param<i64>, !transform.param<i64>, !transform.param<i64>,
                                   !transform.any_op, !transform.any_value, !transform.any_op) {
     ^bb0(%struct: !transform.any_op):
       %rank = transform.match.structured.rank %struct : (!transform.any_op) -> !transform.param<i64>
       transform.match.param.cmpi ge %rank, %c2 : !transform.param<i64>
       transform.match.param.cmpi le %rank, %c4 : !transform.param<i64>
-      
+
       transform.match.structured.dim %struct[-1] {reduction} : !transform.any_op
       transform.match.structured.dim %struct[except(-1)] {parallel} : !transform.any_op
       %dims = transform.match.structured.dim %struct[all] : (!transform.any_op) -> !transform.param<i64>
@@ -43,7 +43,7 @@ module attributes { transform.with_named_sequence } {
       transform.match.structured.input %struct[0] {projected_permutation} : !transform.any_op
       transform.match.structured.init %struct[0] {projected_permutation} : !transform.any_op
       %init = transform.match.structured.init %struct[0] : (!transform.any_op) -> !transform.any_value
-      
+
       // This danse is necessary to create an empty handle if there is no single
       // user without failing the entire match
       %trailing_optional = transform.sequence %struct : (!transform.any_op) -> !transform.any_op failures(suppress) {
@@ -89,7 +89,7 @@ module attributes { transform.with_named_sequence } {
     }
 
     %init_o = transform.get_defining_op %init_v : (!transform.any_value) -> !transform.any_op
-    transform.match.operation_name %init_o ["linalg.fill"] : !transform.any_op    
+    transform.match.operation_name %init_o ["linalg.fill"] : !transform.any_op
 
     transform.yield %operand_o, %init_o, %entry, %trailing_o, %rk, %dms, %bw
         : !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op,
@@ -114,11 +114,11 @@ module attributes { transform.with_named_sequence } {
     transform.yield
   }
 
-  transform.sequence failures(propagate) {
-  ^bb(%root: !transform.any_op):
-    foreach_match in %root
+  transform.named_sequence @__transform_main(%root: !transform.any_op {transform.consumed}) {
+    transform.foreach_match in %root
       @fill_reduce_leading_trailing -> @print_reduce_leading_trailing
       : (!transform.any_op) -> !transform.any_op
+    transform.yield
   }
 }
 
@@ -187,7 +187,7 @@ func.func @reduce_eltwise(%arg : !in_tensor_t) -> (!out_tensor_t) {
     indexing_maps = [affine_map<(d0) -> (d0)>,
                      affine_map<(d0) -> (d0)>],
     iterator_types = ["parallel"]}
-    ins(%5 : !out_tensor_t) outs(%6 : !out_tensor_t) {  
+    ins(%5 : !out_tensor_t) outs(%6 : !out_tensor_t) {
     ^bb0(%arg3: f32, %arg4: f32):
       %4 = math.sqrt %arg3 : f32
       linalg.yield %4 : f32
@@ -234,7 +234,7 @@ func.func @eltwise_reduce_eltwise(%arg : !in_tensor_t) -> (!out_tensor_t) {
     indexing_maps = [affine_map<(d0) -> (d0)>,
                      affine_map<(d0) -> (d0)>],
     iterator_types = ["parallel"]}
-    ins(%6 : !out_tensor_t) outs(%7 : !out_tensor_t) {  
+    ins(%6 : !out_tensor_t) outs(%7 : !out_tensor_t) {
     ^bb0(%arg3: f32, %arg4: f32):
       %4 = math.sqrt %arg3 : f32
       linalg.yield %4 : f32
@@ -283,7 +283,7 @@ func.func @eltwise_reduce_eltwise_swapped(%arg : !in_tensor_t) -> (!out_tensor_t
     indexing_maps = [affine_map<(d0) -> (d0)>,
                      affine_map<(d0) -> (d0)>],
     iterator_types = ["parallel"]}
-    ins(%6 : !out_tensor_t) outs(%7 : !out_tensor_t) {  
+    ins(%6 : !out_tensor_t) outs(%7 : !out_tensor_t) {
     ^bb0(%arg3: f32, %arg4: f32):
       %4 = math.sqrt %arg3 : f32
       linalg.yield %4 : f32
@@ -305,7 +305,7 @@ func.func @reduction_with_extra_op_in_func(%arg0: tensor<8x479xf32>, %arg1: tens
   %result = linalg.generic {
     indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                      affine_map<(d0, d1) -> (d0)>],
-    iterator_types = ["parallel", "reduction"]} 
+    iterator_types = ["parallel", "reduction"]}
     ins(%arg0 : tensor<8x479xf32>)
     outs(%fill : tensor<8xf32>) {
   ^bb0(%in: f32, %out: f32):
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-rewrite-narrow-types.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-rewrite-narrow-types.mlir
index 7d15e2e2e3ef5e4..6bdeb4523865fa9 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/test-rewrite-narrow-types.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-rewrite-narrow-types.mlir
@@ -5,7 +5,7 @@
 // RUN: FileCheck %s
 
 /// Run once with the pattern and compare.
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule -test-lower-to-llvm | \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -test-lower-to-llvm | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void  \
 // RUN:   -shared-libs=%mlir_c_runner_utils | \
 // RUN: FileCheck %s
@@ -25,7 +25,7 @@ func.func @print_as_i1_10xi8(%v : vector<10xi8>) {
 func.func @f(%v: vector<16xi16>) {
   %trunc = arith.trunci %v : vector<16xi16> to vector<16xi5>
   func.call @print_as_i1_16xi5(%trunc) : (vector<16xi5>) -> ()
-  //      CHECK: ( 
+  //      CHECK: (
   // CHECK-SAME: 1, 1, 1, 1, 1,
   // CHECK-SAME: 0, 1, 1, 1, 1,
   // CHECK-SAME: 1, 0, 1, 1, 1,
@@ -45,7 +45,7 @@ func.func @f(%v: vector<16xi16>) {
 
   %bitcast = vector.bitcast %trunc : vector<16xi5> to vector<10xi8>
   func.call @print_as_i1_10xi8(%bitcast) : (vector<10xi8>) -> ()
-  //      CHECK: ( 
+  //      CHECK: (
   // CHECK-SAME: 1, 1, 1, 1, 1, 0, 1, 1,
   // CHECK-SAME: 1, 1, 1, 0, 1, 1, 1, 0,
   // CHECK-SAME: 0, 1, 1, 1, 1, 1, 0, 1,
@@ -190,12 +190,14 @@ func.func @entry() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
-  %f = transform.structured.match ops{["func.func"]} in %module_op
-      : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
+    %f = transform.structured.match ops{["func.func"]} in %module_op
+        : (!transform.any_op) -> !transform.any_op
 
-  transform.apply_patterns to %f {
-    transform.apply_patterns.vector.rewrite_narrow_types
-  } : !transform.any_op
+    transform.apply_patterns to %f {
+      transform.apply_patterns.vector.rewrite_narrow_types
+    } : !transform.any_op
+    transform.yield
+  }
 }
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle16x16.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle16x16.mlir
index 5c21a69dd199183..396417bd9b4485e 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle16x16.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-shuffle16x16.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s -convert-vector-to-scf -convert-scf-to-cf \
-// RUN:   -test-transform-dialect-interpreter \
+// RUN:   -transform-interpreter \
 // RUN:   -test-transform-dialect-erase-schedule \
 // RUN:   -convert-vector-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void  \
@@ -29,10 +29,12 @@ func.func @entry() {
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%func_op: !transform.op<"func.func">):
-  transform.apply_patterns to %func_op {
-    transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_16x16"
-  } : !transform.op<"func.func">
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
+    transform.apply_patterns to %func_op {
+      transform.apply_patterns.vector.lower_transpose lowering_strategy = "shuffle_16x16"
+    } : !transform.op<"func.func">
+    transform.yield
+  }
 }
 
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
index 4aa8ce326cc892e..26bf448a97f8123 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s \
-// RUN:  -test-transform-dialect-interpreter \
+// RUN:  -transform-interpreter \
 // RUN:  -test-transform-dialect-erase-schedule \
 // RUN:  -test-lower-to-nvvm="kernel-index-bitwidth=32 cubin-chip=sm_80 cubin-features=+ptx76 cubin-format=%gpu_compilation_format" \
 // RUN: | mlir-cpu-runner \
@@ -127,57 +127,57 @@ func.func @main() {
 
   // Print the memrefs before computation.
   call @print_lhs_as_memref_32(%lhs) : (!lhs_memref_type) -> ()
-  // CHECK: [0,   0.015625,   0.03125,   0.046875,   0.0625,   0.078125,   0.09375,   0.109375,   0.125,   0.140625,   0.15625,   0.171875,   0.1875,   0.203125,   0.21875,   0.234375], 
-  // CHECK: [0.25,   0.265625,   0.28125,   0.296875,   0.3125,   0.328125,   0.34375,   0.359375,   0.375,   0.390625,   0.40625,   0.421875,   0.4375,   0.453125,   0.46875,   0.484375], 
-  // CHECK: [0.5,   0.515625,   0.53125,   0.546875,   0.5625,   0.578125,   0.59375,   0.609375,   0.625,   0.640625,   0.65625,   0.671875,   0.6875,   0.703125,   0.71875,   0.734375], 
-  // CHECK: [0.75,   0.765625,   0.78125,   0.796875,   0.8125,   0.828125,   0.84375,   0.859375,   0.875,   0.890625,   0.90625,   0.921875,   0.9375,   0.953125,   0.96875,   0.984375], 
-  // CHECK: [1,   1.01562,   1.03125,   1.04688,   1.0625,   1.07812,   1.09375,   1.10938,   1.125,   1.14062,   1.15625,   1.17188,   1.1875,   1.20312,   1.21875,   1.23438], 
-  // CHECK: [1.25,   1.26562,   1.28125,   1.29688,   1.3125,   1.32812,   1.34375,   1.35938,   1.375,   1.39062,   1.40625,   1.42188,   1.4375,   1.45312,   1.46875,   1.48438], 
-  // CHECK: [1.5,   1.51562,   1.53125,   1.54688,   1.5625,   1.57812,   1.59375,   1.60938,   1.625,   1.64062,   1.65625,   1.67188,   1.6875,   1.70312,   1.71875,   1.73438], 
-  // CHECK: [1.75,   1.76562,   1.78125,   1.79688,   1.8125,   1.82812,   1.84375,   1.85938,   1.875,   1.89062,   1.90625,   1.92188,   1.9375,   1.95312,   1.96875,   1.98438], 
-  // CHECK: [2,   2.01562,   2.03125,   2.04688,   2.0625,   2.07812,   2.09375,   2.10938,   2.125,   2.14062,   2.15625,   2.17188,   2.1875,   2.20312,   2.21875,   2.23438], 
-  // CHECK: [2.25,   2.26562,   2.28125,   2.29688,   2.3125,   2.32812,   2.34375,   2.35938,   2.375,   2.39062,   2.40625,   2.42188,   2.4375,   2.45312,   2.46875,   2.48438], 
-  // CHECK: [2.5,   2.51562,   2.53125,   2.54688,   2.5625,   2.57812,   2.59375,   2.60938,   2.625,   2.64062,   2.65625,   2.67188,   2.6875,   2.70312,   2.71875,   2.73438], 
-  // CHECK: [2.75,   2.76562,   2.78125,   2.79688,   2.8125,   2.82812,   2.84375,   2.85938,   2.875,   2.89062,   2.90625,   2.92188,   2.9375,   2.95312,   2.96875,   2.98438], 
-  // CHECK: [3,   3.01562,   3.03125,   3.04688,   3.0625,   3.07812,   3.09375,   3.10938,   3.125,   3.14062,   3.15625,   3.17188,   3.1875,   3.20312,   3.21875,   3.23438], 
-  // CHECK: [3.25,   3.26562,   3.28125,   3.29688,   3.3125,   3.32812,   3.34375,   3.35938,   3.375,   3.39062,   3.40625,   3.42188,   3.4375,   3.45312,   3.46875,   3.48438], 
-  // CHECK: [3.5,   3.51562,   3.53125,   3.54688,   3.5625,   3.57812,   3.59375,   3.60938,   3.625,   3.64062,   3.65625,   3.67188,   3.6875,   3.70312,   3.71875,   3.73438], 
+  // CHECK: [0,   0.015625,   0.03125,   0.046875,   0.0625,   0.078125,   0.09375,   0.109375,   0.125,   0.140625,   0.15625,   0.171875,   0.1875,   0.203125,   0.21875,   0.234375],
+  // CHECK: [0.25,   0.265625,   0.28125,   0.296875,   0.3125,   0.328125,   0.34375,   0.359375,   0.375,   0.390625,   0.40625,   0.421875,   0.4375,   0.453125,   0.46875,   0.484375],
+  // CHECK: [0.5,   0.515625,   0.53125,   0.546875,   0.5625,   0.578125,   0.59375,   0.609375,   0.625,   0.640625,   0.65625,   0.671875,   0.6875,   0.703125,   0.71875,   0.734375],
+  // CHECK: [0.75,   0.765625,   0.78125,   0.796875,   0.8125,   0.828125,   0.84375,   0.859375,   0.875,   0.890625,   0.90625,   0.921875,   0.9375,   0.953125,   0.96875,   0.984375],
+  // CHECK: [1,   1.01562,   1.03125,   1.04688,   1.0625,   1.07812,   1.09375,   1.10938,   1.125,   1.14062,   1.15625,   1.17188,   1.1875,   1.20312,   1.21875,   1.23438],
+  // CHECK: [1.25,   1.26562,   1.28125,   1.29688,   1.3125,   1.32812,   1.34375,   1.35938,   1.375,   1.39062,   1.40625,   1.42188,   1.4375,   1.45312,   1.46875,   1.48438],
+  // CHECK: [1.5,   1.51562,   1.53125,   1.54688,   1.5625,   1.57812,   1.59375,   1.60938,   1.625,   1.64062,   1.65625,   1.67188,   1.6875,   1.70312,   1.71875,   1.73438],
+  // CHECK: [1.75,   1.76562,   1.78125,   1.79688,   1.8125,   1.82812,   1.84375,   1.85938,   1.875,   1.89062,   1.90625,   1.92188,   1.9375,   1.95312,   1.96875,   1.98438],
+  // CHECK: [2,   2.01562,   2.03125,   2.04688,   2.0625,   2.07812,   2.09375,   2.10938,   2.125,   2.14062,   2.15625,   2.17188,   2.1875,   2.20312,   2.21875,   2.23438],
+  // CHECK: [2.25,   2.26562,   2.28125,   2.29688,   2.3125,   2.32812,   2.34375,   2.35938,   2.375,   2.39062,   2.40625,   2.42188,   2.4375,   2.45312,   2.46875,   2.48438],
+  // CHECK: [2.5,   2.51562,   2.53125,   2.54688,   2.5625,   2.57812,   2.59375,   2.60938,   2.625,   2.64062,   2.65625,   2.67188,   2.6875,   2.70312,   2.71875,   2.73438],
+  // CHECK: [2.75,   2.76562,   2.78125,   2.79688,   2.8125,   2.82812,   2.84375,   2.85938,   2.875,   2.89062,   2.90625,   2.92188,   2.9375,   2.95312,   2.96875,   2.98438],
+  // CHECK: [3,   3.01562,   3.03125,   3.04688,   3.0625,   3.07812,   3.09375,   3.10938,   3.125,   3.14062,   3.15625,   3.17188,   3.1875,   3.20312,   3.21875,   3.23438],
+  // CHECK: [3.25,   3.26562,   3.28125,   3.29688,   3.3125,   3.32812,   3.34375,   3.35938,   3.375,   3.39062,   3.40625,   3.42188,   3.4375,   3.45312,   3.46875,   3.48438],
+  // CHECK: [3.5,   3.51562,   3.53125,   3.54688,   3.5625,   3.57812,   3.59375,   3.60938,   3.625,   3.64062,   3.65625,   3.67188,   3.6875,   3.70312,   3.71875,   3.73438],
   // CHECK: [3.75,   3.76562,   3.78125,   3.79688,   3.8125,   3.82812,   3.84375,   3.85938,   3.875,   3.89062,   3.90625,   3.92188,   3.9375,   3.95312,   3.96875,   3.98438]
 
   call @print_rhs_as_memref_32(%rhs) : (!rhs_memref_type) -> ()
-  // CHECK: [0,   0.015625,   0.03125,   0.046875,   0.0625,   0.078125,   0.09375,   0.109375], 
-  // CHECK: [0.125,   0.140625,   0.15625,   0.171875,   0.1875,   0.203125,   0.21875,   0.234375], 
-  // CHECK: [0.25,   0.265625,   0.28125,   0.296875,   0.3125,   0.328125,   0.34375,   0.359375], 
-  // CHECK: [0.375,   0.390625,   0.40625,   0.421875,   0.4375,   0.453125,   0.46875,   0.484375], 
-  // CHECK: [0.5,   0.515625,   0.53125,   0.546875,   0.5625,   0.578125,   0.59375,   0.609375], 
-  // CHECK: [0.625,   0.640625,   0.65625,   0.671875,   0.6875,   0.703125,   0.71875,   0.734375], 
-  // CHECK: [0.75,   0.765625,   0.78125,   0.796875,   0.8125,   0.828125,   0.84375,   0.859375], 
-  // CHECK: [0.875,   0.890625,   0.90625,   0.921875,   0.9375,   0.953125,   0.96875,   0.984375], 
-  // CHECK: [1,   1.01562,   1.03125,   1.04688,   1.0625,   1.07812,   1.09375,   1.10938], 
-  // CHECK: [1.125,   1.14062,   1.15625,   1.17188,   1.1875,   1.20312,   1.21875,   1.23438], 
-  // CHECK: [1.25,   1.26562,   1.28125,   1.29688,   1.3125,   1.32812,   1.34375,   1.35938], 
-  // CHECK: [1.375,   1.39062,   1.40625,   1.42188,   1.4375,   1.45312,   1.46875,   1.48438], 
-  // CHECK: [1.5,   1.51562,   1.53125,   1.54688,   1.5625,   1.57812,   1.59375,   1.60938], 
-  // CHECK: [1.625,   1.64062,   1.65625,   1.67188,   1.6875,   1.70312,   1.71875,   1.73438], 
-  // CHECK: [1.75,   1.76562,   1.78125,   1.79688,   1.8125,   1.82812,   1.84375,   1.85938], 
+  // CHECK: [0,   0.015625,   0.03125,   0.046875,   0.0625,   0.078125,   0.09375,   0.109375],
+  // CHECK: [0.125,   0.140625,   0.15625,   0.171875,   0.1875,   0.203125,   0.21875,   0.234375],
+  // CHECK: [0.25,   0.265625,   0.28125,   0.296875,   0.3125,   0.328125,   0.34375,   0.359375],
+  // CHECK: [0.375,   0.390625,   0.40625,   0.421875,   0.4375,   0.453125,   0.46875,   0.484375],
+  // CHECK: [0.5,   0.515625,   0.53125,   0.546875,   0.5625,   0.578125,   0.59375,   0.609375],
+  // CHECK: [0.625,   0.640625,   0.65625,   0.671875,   0.6875,   0.703125,   0.71875,   0.734375],
+  // CHECK: [0.75,   0.765625,   0.78125,   0.796875,   0.8125,   0.828125,   0.84375,   0.859375],
+  // CHECK: [0.875,   0.890625,   0.90625,   0.921875,   0.9375,   0.953125,   0.96875,   0.984375],
+  // CHECK: [1,   1.01562,   1.03125,   1.04688,   1.0625,   1.07812,   1.09375,   1.10938],
+  // CHECK: [1.125,   1.14062,   1.15625,   1.17188,   1.1875,   1.20312,   1.21875,   1.23438],
+  // CHECK: [1.25,   1.26562,   1.28125,   1.29688,   1.3125,   1.32812,   1.34375,   1.35938],
+  // CHECK: [1.375,   1.39062,   1.40625,   1.42188,   1.4375,   1.45312,   1.46875,   1.48438],
+  // CHECK: [1.5,   1.51562,   1.53125,   1.54688,   1.5625,   1.57812,   1.59375,   1.60938],
+  // CHECK: [1.625,   1.64062,   1.65625,   1.67188,   1.6875,   1.70312,   1.71875,   1.73438],
+  // CHECK: [1.75,   1.76562,   1.78125,   1.79688,   1.8125,   1.82812,   1.84375,   1.85938],
   // CHECK: [1.875,   1.89062,   1.90625,   1.92188,   1.9375,   1.95312,   1.96875,   1.98438]
 
   call @print_res_as_memref_32(%res) : (!res_memref_type) -> ()
-  // CHECK: [0,   0.015625,   0.03125,   0.046875,   0.0625,   0.078125,   0.09375,   0.109375], 
-  // CHECK: [0.125,   0.140625,   0.15625,   0.171875,   0.1875,   0.203125,   0.21875,   0.234375], 
-  // CHECK: [0.25,   0.265625,   0.28125,   0.296875,   0.3125,   0.328125,   0.34375,   0.359375], 
-  // CHECK: [0.375,   0.390625,   0.40625,   0.421875,   0.4375,   0.453125,   0.46875,   0.484375], 
-  // CHECK: [0.5,   0.515625,   0.53125,   0.546875,   0.5625,   0.578125,   0.59375,   0.609375], 
-  // CHECK: [0.625,   0.640625,   0.65625,   0.671875,   0.6875,   0.703125,   0.71875,   0.734375], 
-  // CHECK: [0.75,   0.765625,   0.78125,   0.796875,   0.8125,   0.828125,   0.84375,   0.859375], 
-  // CHECK: [0.875,   0.890625,   0.90625,   0.921875,   0.9375,   0.953125,   0.96875,   0.984375], 
-  // CHECK: [1,   1.01562,   1.03125,   1.04688,   1.0625,   1.07812,   1.09375,   1.10938], 
-  // CHECK: [1.125,   1.14062,   1.15625,   1.17188,   1.1875,   1.20312,   1.21875,   1.23438], 
-  // CHECK: [1.25,   1.26562,   1.28125,   1.29688,   1.3125,   1.32812,   1.34375,   1.35938], 
-  // CHECK: [1.375,   1.39062,   1.40625,   1.42188,   1.4375,   1.45312,   1.46875,   1.48438], 
-  // CHECK: [1.5,   1.51562,   1.53125,   1.54688,   1.5625,   1.57812,   1.59375,   1.60938], 
-  // CHECK: [1.625,   1.64062,   1.65625,   1.67188,   1.6875,   1.70312,   1.71875,   1.73438], 
-  // CHECK: [1.75,   1.76562,   1.78125,   1.79688,   1.8125,   1.82812,   1.84375,   1.85938], 
+  // CHECK: [0,   0.015625,   0.03125,   0.046875,   0.0625,   0.078125,   0.09375,   0.109375],
+  // CHECK: [0.125,   0.140625,   0.15625,   0.171875,   0.1875,   0.203125,   0.21875,   0.234375],
+  // CHECK: [0.25,   0.265625,   0.28125,   0.296875,   0.3125,   0.328125,   0.34375,   0.359375],
+  // CHECK: [0.375,   0.390625,   0.40625,   0.421875,   0.4375,   0.453125,   0.46875,   0.484375],
+  // CHECK: [0.5,   0.515625,   0.53125,   0.546875,   0.5625,   0.578125,   0.59375,   0.609375],
+  // CHECK: [0.625,   0.640625,   0.65625,   0.671875,   0.6875,   0.703125,   0.71875,   0.734375],
+  // CHECK: [0.75,   0.765625,   0.78125,   0.796875,   0.8125,   0.828125,   0.84375,   0.859375],
+  // CHECK: [0.875,   0.890625,   0.90625,   0.921875,   0.9375,   0.953125,   0.96875,   0.984375],
+  // CHECK: [1,   1.01562,   1.03125,   1.04688,   1.0625,   1.07812,   1.09375,   1.10938],
+  // CHECK: [1.125,   1.14062,   1.15625,   1.17188,   1.1875,   1.20312,   1.21875,   1.23438],
+  // CHECK: [1.25,   1.26562,   1.28125,   1.29688,   1.3125,   1.32812,   1.34375,   1.35938],
+  // CHECK: [1.375,   1.39062,   1.40625,   1.42188,   1.4375,   1.45312,   1.46875,   1.48438],
+  // CHECK: [1.5,   1.51562,   1.53125,   1.54688,   1.5625,   1.57812,   1.59375,   1.60938],
+  // CHECK: [1.625,   1.64062,   1.65625,   1.67188,   1.6875,   1.70312,   1.71875,   1.73438],
+  // CHECK: [1.75,   1.76562,   1.78125,   1.79688,   1.8125,   1.82812,   1.84375,   1.85938],
   // CHECK: [1.875,   1.89062,   1.90625,   1.92188,   1.9375,   1.95312,   1.96875,   1.98438]
 
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
@@ -193,21 +193,21 @@ func.func @main() {
   // Print the result memref after computation.
   // This has been verified against other f16 CUDA implementations.
   call @print_res_as_memref_32(%res) : (!res_memref_type) -> ()
-  // CHECK: [2.42188,   2.4668,   2.51172,   2.55664,   2.60156,   2.64648,   2.69141,   2.73633], 
-  // CHECK: [6.29688,   6.40625,   6.51172,   6.61719,   6.72656,   6.83594,   6.94141,   7.04688], 
-  // CHECK: [10.1719,   10.3438,   10.5156,   10.6797,   10.8516,   11.0234,   11.1875,   11.3594], 
-  // CHECK: [14.0469,   14.2812,   14.5156,   14.7422,   14.9766,   15.2109,   15.4375,   15.6719], 
-  // CHECK: [17.9219,   18.2188,   18.5156,   18.8125,   19.0938,   19.3906,   19.6875,   19.9844], 
-  // CHECK: [21.7969,   22.1562,   22.5156,   22.875,   23.2188,   23.5781,   23.9375,   24.2969], 
-  // CHECK: [25.6719,   26.0938,   26.5156,   26.9375,   27.3438,   27.7656,   28.1875,   28.6094], 
-  // CHECK: [29.5469,   30.0312,   30.5156,   31,   31.4688,   31.9531,   32.4375,   32.9375], 
-  // CHECK: [33.4375,   33.9688,   34.5,   35.0625,   35.5938,   36.1562,   36.6875,   37.25], 
-  // CHECK: [37.3125,   37.9062,   38.5,   39.125,   39.7188,   40.3438,   40.9375,   41.5625], 
-  // CHECK: [41.1875,   41.8438,   42.5,   43.1875,   43.8438,   44.5312,   45.1875,   45.875], 
-  // CHECK: [45.0625,   45.7812,   46.5,   47.25,   47.9688,   48.7188,   49.4375,   50.1875], 
-  // CHECK: [48.9375,   49.7188,   50.5,   51.3125,   52.0938,   52.9062,   53.6875,   54.5], 
-  // CHECK: [52.8125,   53.6562,   54.5,   55.375,   56.2188,   57.0938,   57.9375,   58.8125], 
-  // CHECK: [56.6875,   57.5938,   58.5,   59.4375,   60.3438,   61.2812,   62.1875,   63.125], 
+  // CHECK: [2.42188,   2.4668,   2.51172,   2.55664,   2.60156,   2.64648,   2.69141,   2.73633],
+  // CHECK: [6.29688,   6.40625,   6.51172,   6.61719,   6.72656,   6.83594,   6.94141,   7.04688],
+  // CHECK: [10.1719,   10.3438,   10.5156,   10.6797,   10.8516,   11.0234,   11.1875,   11.3594],
+  // CHECK: [14.0469,   14.2812,   14.5156,   14.7422,   14.9766,   15.2109,   15.4375,   15.6719],
+  // CHECK: [17.9219,   18.2188,   18.5156,   18.8125,   19.0938,   19.3906,   19.6875,   19.9844],
+  // CHECK: [21.7969,   22.1562,   22.5156,   22.875,   23.2188,   23.5781,   23.9375,   24.2969],
+  // CHECK: [25.6719,   26.0938,   26.5156,   26.9375,   27.3438,   27.7656,   28.1875,   28.6094],
+  // CHECK: [29.5469,   30.0312,   30.5156,   31,   31.4688,   31.9531,   32.4375,   32.9375],
+  // CHECK: [33.4375,   33.9688,   34.5,   35.0625,   35.5938,   36.1562,   36.6875,   37.25],
+  // CHECK: [37.3125,   37.9062,   38.5,   39.125,   39.7188,   40.3438,   40.9375,   41.5625],
+  // CHECK: [41.1875,   41.8438,   42.5,   43.1875,   43.8438,   44.5312,   45.1875,   45.875],
+  // CHECK: [45.0625,   45.7812,   46.5,   47.25,   47.9688,   48.7188,   49.4375,   50.1875],
+  // CHECK: [48.9375,   49.7188,   50.5,   51.3125,   52.0938,   52.9062,   53.6875,   54.5],
+  // CHECK: [52.8125,   53.6562,   54.5,   55.375,   56.2188,   57.0938,   57.9375,   58.8125],
+  // CHECK: [56.6875,   57.5938,   58.5,   59.4375,   60.3438,   61.2812,   62.1875,   63.125],
   // CHECK: [60.5625,   61.5312,   62.5,   63.5,   64.5,   65.4375,   66.4375,   67.4375]
 
   return
@@ -215,10 +215,12 @@ func.func @main() {
 
 func.func private @printMemrefF32(memref<*xf32>)
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.nvgpu.rewrite_matmul_as_mma_sync %matmul 
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.nvgpu.rewrite_matmul_as_mma_sync %matmul
+      : (!transform.any_op) -> ()
+      transform.yield
+  }
 }
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
index 21f3a76c82af1f3..4ea72f3b82c2640 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
@@ -1,15 +1,15 @@
 // RUN: mlir-opt %s \
-// RUN:   -test-transform-dialect-interpreter \
+// RUN:   -transform-interpreter \
 // RUN: | FileCheck %s --check-prefix=CHECK-MMA-SYNC
 
 // CHECK-MMA-SYNC-LABEL: func @main() {
-//       CHECK-MMA-SYNC:   nvgpu.mma.sync(%{{.*}}) {mmaShape = [16, 8, 4], tf32Enabled} 
+//       CHECK-MMA-SYNC:   nvgpu.mma.sync(%{{.*}}) {mmaShape = [16, 8, 4], tf32Enabled}
 //  CHECK-MMA-SYNC-SAME:     : (vector<2x1xf32>, vector<1x1xf32>, vector<2x2xf32>) -> vector<2x2xf32>
 
 // Tested to run locally in 1.7s.
 
 // RUN: mlir-opt %s \
-// RUN:   -test-transform-dialect-interpreter \
+// RUN:   -transform-interpreter \
 // RUN:   -test-transform-dialect-erase-schedule \
 // RUN:   -test-lower-to-nvvm="kernel-index-bitwidth=32 cubin-chip=sm_80 cubin-features=+ptx76 cubin-format=%gpu_compilation_format" \
 // RUN: | mlir-cpu-runner \
@@ -154,10 +154,12 @@ func.func @main() {
 
 func.func private @printMemrefF32(memref<*xf32>)
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.nvgpu.rewrite_matmul_as_mma_sync %matmul 
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.nvgpu.rewrite_matmul_as_mma_sync %matmul
+      : (!transform.any_op) -> ()
+    transform.yield
+  }
 }
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir b/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
index 9f1b2ce005a2869..f83f65bb2963cae 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
@@ -1,5 +1,5 @@
 // RUN: mlir-opt %s \
-// RUN:     -test-transform-dialect-interpreter \
+// RUN:     -transform-interpreter \
 // RUN:     -test-transform-dialect-erase-schedule \
 // RUN:     -convert-nvgpu-to-nvvm -gpu-kernel-outlining \
 // RUN:     -convert-scf-to-cf -convert-nvvm-to-llvm \
@@ -26,7 +26,7 @@
 // CHECK-PTX: mbarrier.try_wait.parity.shared.b64
 
 // RUN: mlir-opt %s \
-// RUN:     -test-transform-dialect-interpreter \
+// RUN:     -transform-interpreter \
 // RUN:     -test-transform-dialect-erase-schedule \
 // RUN:     -convert-nvgpu-to-nvvm -gpu-kernel-outlining \
 // RUN:     -convert-scf-to-cf -convert-nvvm-to-llvm \
@@ -83,7 +83,7 @@ func.func @main() {
   %memref_1, %asyncToken_2 = gpu.alloc async [%0] () : memref<8x128xf32>
   %1 = gpu.memcpy async [%0] %memref, %alloc : memref<64x8xf32>, memref<64x8xf32>
   %2 = gpu.memcpy async [%0] %memref_1, %alloc_0 : memref<8x128xf32>, memref<8x128xf32>
-  
+
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
             threads(%tx, %ty, %tz) in (%block_x = %c128, %block_y = %c1, %block_z = %c1) {
     %out = memref.get_global @bufferLhsGlobal : memref<64x8xf32, 3>
@@ -101,14 +101,16 @@ func.func @main() {
     }
     gpu.terminator
   }
-  
+
   return
 }
 
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-  %copy = transform.structured.match ops{["linalg.copy"]} in %arg1 
-    : (!transform.any_op) -> !transform.any_op
-  transform.nvgpu.rewrite_copy_as_tma %copy 
-    : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %copy = transform.structured.match ops{["linalg.copy"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+    transform.nvgpu.rewrite_copy_as_tma %copy
+      : (!transform.any_op) -> ()
+      transform.yield
+  }
 }



More information about the Mlir-commits mailing list