[Mlir-commits] [mlir] 4519ca3 - [mlir][Linalg] NFC - Drop Linalg EDSC usage

Nicolas Vasilache llvmlistbot at llvm.org
Thu May 20 08:34:03 PDT 2021


Author: Nicolas Vasilache
Date: 2021-05-20T15:33:56Z
New Revision: 4519ca3d2e56fee3d9b83a8228db5d5605680d4a

URL: https://github.com/llvm/llvm-project/commit/4519ca3d2e56fee3d9b83a8228db5d5605680d4a
DIFF: https://github.com/llvm/llvm-project/commit/4519ca3d2e56fee3d9b83a8228db5d5605680d4a.diff

LOG: [mlir][Linalg] NFC - Drop Linalg EDSC usage

Drop the Linalg dialect EDSC subdirectory and update all uses.

Differential Revision: https://reviews.llvm.org/D102848

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h
    mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
    mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
    mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
    mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgTypes.h
    mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
    mlir/lib/CAPI/Dialect/Linalg.cpp
    mlir/lib/Dialect/Linalg/CMakeLists.txt
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
    mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
    mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Utils/CMakeLists.txt
    mlir/test/mlir-linalg-ods-gen/test-linalg-ods-gen.tc
    mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
    mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp
    mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp

Removed: 
    mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
    mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
    mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
    mlir/lib/Dialect/Linalg/EDSC/CMakeLists.txt


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
deleted file mode 100644
index 43dff8150f77..000000000000
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
+++ /dev/null
@@ -1,231 +0,0 @@
-//===- Builders.h - MLIR Declarative Linalg Builders ------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Provides intuitive composable interfaces for building structured MLIR
-// snippets in a declarative fashion.
-//
-//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_LINALG_EDSC_BUILDERS_H_
-#define MLIR_DIALECT_LINALG_EDSC_BUILDERS_H_
-
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
-#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/IR/AffineExpr.h"
-#include "mlir/IR/Builders.h"
-
-namespace mlir {
-class AffineForOp;
-class BlockArgument;
-
-namespace scf {
-class ParallelOp;
-} // namespace scf
-
-namespace edsc {
-inline void defaultRegionBuilder(ValueRange args) {}
-
-/// Build a `linalg.generic` op with the specified `inputs`, `outputs`,
-/// `resultTensorsTypes` and `region`.
-///
-/// `otherValues` and `otherAttributes` may be passed and will be appended as
-/// operands and attributes respectively.
-///
-/// Prerequisites:
-/// =============
-///
-/// 1. `inputs` may contain StructuredIndexed that capture either buffer or
-/// tensor values.
-/// 2. `outputs` may contain StructuredIndexed that capture either buffer or
-/// tensor values. In the future this will be extended with ranked shape values.
-/// 4. `resultTensorTypes` may contain return tensor types.
-Operation *makeGenericLinalgOp(
-    ArrayRef<IteratorType> iteratorTypes, ArrayRef<StructuredIndexed> inputs,
-    ArrayRef<StructuredIndexed> outputs, TypeRange resultTensorTypes,
-    function_ref<void(ValueRange)> regionBuilder = defaultRegionBuilder,
-    ArrayRef<Value> otherValues = {}, ArrayRef<Attribute> otherAttributes = {});
-
-namespace ops {
-using edsc::StructuredIndexed;
-
-//===----------------------------------------------------------------------===//
-// EDSC builders for linalg generic operations.
-//===----------------------------------------------------------------------===//
-
-/// Build the body of a region to compute a scalar multiply, under the current
-/// ScopedContext, at the current insert point.
-void mulRegionBuilder(ValueRange args);
-
-/// Build the body of a region to compute a scalar multiply-accumulate, under
-/// the current ScopedContext, at the current insert point.
-void macRegionBuilder(ValueRange args);
-
-/// TODO: In the future we should tie these implementations to something in
-/// Tablegen that generates the proper interfaces and the proper sugared named
-/// ops.
-
-/// Build a linalg.pointwise, under the current ScopedContext, at the current
-/// insert point, that computes:
-/// ```
-///    (i0, ..., in) = (par, ..., par)
-///    |
-///    |  O...(some_subset...(i0, ..., in)) =
-///    |    some_pointwise_func...(I...(some_other_subset...(i0, ..., in)))
-/// ```
-///
-/// This is a very generic entry point that can be configured in many ways to
-/// build a perfect loop nest of parallel loops with arbitrarily complex
-/// innermost loop code and whatever (explicit) broadcast semantics.
-///
-/// This can be used with both out-of-place and in-place semantics.
-/// The client is responsible for ensuring the region operations are compatible
-/// with in-place semantics and parallelism.
-
-/// Unary pointwise operation (with broadcast) entry point.
-using UnaryPointwiseOpBuilder = function_ref<Value(Value)>;
-Operation *linalg_generic_pointwise(UnaryPointwiseOpBuilder unaryOp,
-                                    StructuredIndexed I, StructuredIndexed O);
-
-/// Build a linalg.pointwise with all `parallel` iterators and a region that
-/// computes `O = tanh(I)`. The client is responsible for specifying the proper
-/// indexings when creating the StructuredIndexed.
-Operation *linalg_generic_pointwise_tanh(StructuredIndexed I,
-                                         StructuredIndexed O);
-
-/// Binary pointwise operation (with broadcast) entry point.
-using BinaryPointwiseOpBuilder = function_ref<Value(Value, Value)>;
-Operation *linalg_generic_pointwise(BinaryPointwiseOpBuilder binaryOp,
-                                    StructuredIndexed I1, StructuredIndexed I2,
-                                    StructuredIndexed O);
-
-/// Build a linalg.pointwise with all `parallel` iterators and a region that
-/// computes `O = I1 + I2`. The client is responsible for specifying the proper
-/// indexings when creating the StructuredIndexed.
-Operation *linalg_generic_pointwise_add(StructuredIndexed I1,
-                                        StructuredIndexed I2,
-                                        StructuredIndexed O);
-
-/// Build a linalg.pointwise with all `parallel` iterators and a region that
-/// computes `O = max(I1, I2)`. The client is responsible for specifying the
-/// proper indexings when creating the StructuredIndexed.
-Operation *linalg_generic_pointwise_max(StructuredIndexed I1,
-                                        StructuredIndexed I2,
-                                        StructuredIndexed O);
-
-// TODO: Implement more useful pointwise operations on a per-need basis.
-
-using MatmulRegionBuilder = function_ref<void(ValueRange args)>;
-
-/// Build a linalg.generic, under the current ScopedContext, at the current
-/// insert point, that computes:
-/// ```
-///    (m, n, k) = (par, par, seq)
-///    |
-///    |  C(m, n) += A(m, k) * B(k, n)
-/// ```
-Operation *
-linalg_generic_matmul(Value vA, Value vB, Value vC,
-                      MatmulRegionBuilder regionBuilder = macRegionBuilder);
-
-/// Build a linalg.generic, under the current ScopedContext, at the current
-/// insert point, that computes:
-/// ```
-///    (m, n, k) = (par, par, seq)
-///    |
-///    |  D(m, n) = C(m, n) + sum_k(A(m, k) * B(k, n))
-/// ```
-/// and returns the tensor `D`.
-Operation *
-linalg_generic_matmul(Value vA, Value vB, Value vC, RankedTensorType tD,
-                      MatmulRegionBuilder regionBuilder = macRegionBuilder);
-
-template <typename Container>
-Operation *
-linalg_generic_matmul(Container values,
-                      MatmulRegionBuilder regionBuilder = macRegionBuilder) {
-  assert(values.size() == 3 && "Expected exactly 3 values");
-  return linalg_generic_matmul(values[0], values[1], values[2], regionBuilder);
-}
-
-/// Build a linalg.generic, under the current ScopedContext, at the current
-/// insert point, that computes:
-/// ```
-///    (batch, f, [h, w, ...], [kh, kw, ...], c) =
-///    |  (par, par, [par, par, ...], [red, red, ...], red)
-///    |
-///    | O(batch, [h, w, ...], f) +=
-///    |   I(batch,
-///    |     [
-///    |       stride[0] * h + dilations[0] * kh,
-///    |       stride[1] * w + dilations[1] * kw, ...
-///          ],
-///    |     c)
-///    |   *
-///    |   W([kh, kw, ...], c, f)
-/// ```
-/// If `dilations` or `strides` are left empty, the default value of `1` is used
-/// along each relevant dimension.
-///
-/// For now `...` must be empty (i.e. only 2-D convolutions are supported).
-///
-// TODO: Extend convolution rank with some template magic.
-Operation *linalg_generic_conv_nhwc(Value vI, Value vW, Value vO,
-                                    ArrayRef<int> strides = {},
-                                    ArrayRef<int> dilations = {});
-
-template <typename Container>
-Operation *linalg_generic_conv_nhwc(Container values,
-                                    ArrayRef<int> strides = {},
-                                    ArrayRef<int> dilations = {}) {
-  assert(values.size() == 3 && "Expected exactly 3 values");
-  return linalg_generic_conv_nhwc(values[0], values[1], values[2], strides,
-                                  dilations);
-}
-
-/// Build a linalg.generic, under the current ScopedContext, at the current
-/// insert point, that computes:
-/// ```
-///    (batch, dm, c, [h, w, ...], [kh, kw, ...]) =
-///    |  (par, par, par, [par, par, ...], [red, red, ...])
-///    |
-///    | O(batch, [h, w, ...], c * depth_multiplier) +=
-///    |   I(batch,
-///    |     [
-///    |       stride[0] * h + dilations[0] * kh,
-///    |       stride[1] * w + dilations[1] * kw, ...
-///          ],
-///    |     c)
-///    |   *
-///    |   W([kh, kw, ...], c, depth_multiplier)
-/// ```
-/// If `dilations` or `strides` are left empty, the default value of `1` is used
-/// along each relevant dimension.
-///
-/// For now `...` must be empty (i.e. only 2-D convolutions are supported).
-///
-// TODO: Extend convolution rank with some template magic.
-Operation *linalg_generic_dilated_conv_nhwc(Value vI, Value vW, Value vO,
-                                            int depth_multiplier = 1,
-                                            ArrayRef<int> strides = {},
-                                            ArrayRef<int> dilations = {});
-
-template <typename Container>
-Operation *linalg_generic_dilated_conv_nhwc(Container values,
-                                            int depth_multiplier,
-                                            ArrayRef<int> strides = {},
-                                            ArrayRef<int> dilations = {}) {
-  assert(values.size() == 3 && "Expected exactly 3 values");
-  return linalg_generic_dilated_conv_nhwc(values[0], values[1], values[2],
-                                          depth_multiplier, strides, dilations);
-}
-
-} // namespace ops
-} // namespace edsc
-} // namespace mlir
-
-#endif // MLIR_DIALECT_LINALG_EDSC_BUILDERS_H_

diff  --git a/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h b/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h
index 262b6804ab8f..e69de29bb2d1 100644
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h
+++ b/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h
@@ -1,26 +0,0 @@
-//===- FoldedIntrinsics.h - MLIR EDSC Intrinsics for Linalg -----*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_LINALG_EDSC_FOLDEDINTRINSICS_H_
-#define MLIR_DIALECT_LINALG_EDSC_FOLDEDINTRINSICS_H_
-
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
-#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Math/IR/Math.h"
-#include "mlir/Dialect/MemRef/IR/MemRef.h"
-#include "mlir/Dialect/Tensor/IR/Tensor.h"
-
-#include "mlir/Transforms/FoldUtils.h"
-
-namespace mlir {
-namespace edsc {
-namespace intrinsics {
-} // namespace intrinsics
-} // namespace edsc
-} // namespace mlir
-
-#endif // MLIR_DIALECT_LINALG_EDSC_FOLDEDINTRINSICS_H_

diff  --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
deleted file mode 100644
index 4ebbb93cca5d..000000000000
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- Intrinsics.h - MLIR EDSC Intrinsics for Linalg -----------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_LINALG_EDSC_INTRINSICS_H_
-#define MLIR_DIALECT_LINALG_EDSC_INTRINSICS_H_
-
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
-#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
-
-namespace mlir {
-namespace edsc {
-namespace intrinsics {
-
-using linalg_copy = OperationBuilder<linalg::CopyOp>;
-using linalg_dot = OperationBuilder<linalg::DotOp>;
-using linalg_fill = OperationBuilder<linalg::FillOp>;
-using linalg_init_tensor = ValueBuilder<linalg::InitTensorOp>;
-using linalg_matmul = OperationBuilder<linalg::MatmulOp>;
-using linalg_matvec = OperationBuilder<linalg::MatvecOp>;
-using linalg_vecmat = OperationBuilder<linalg::VecmatOp>;
-using linalg_range = ValueBuilder<linalg::RangeOp>;
-using linalg_reshape = ValueBuilder<linalg::ReshapeOp>;
-using linalg_yield = OperationBuilder<linalg::YieldOp>;
-
-} // namespace intrinsics
-} // namespace edsc
-} // namespace mlir
-
-#endif // MLIR_DIALECT_LINALG_EDSC_INTRINSICS_H_

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
index 307eebf6ddf5..6dabb6de2cfb 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
@@ -48,7 +48,8 @@ def Linalg_Dialect : Dialect {
     constexpr const static ::llvm::StringLiteral
       kInplaceableAttrName = "linalg.inplaceable";
 
-    using RegionBuilderFunType = llvm::function_ref<void(Block &, ValueRange)>;
+    using RegionBuilderFunType =
+      llvm::function_ref<void(ImplicitLocOpBuilder &b, Block &, ValueRange)>;
     RegionBuilderFunType getRegionBuilder(StringRef name) {
       return namedStructuredOpRegionBuilders.lookup(name);
     }

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
index fb419d0623ae..68491d8aad88 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
@@ -17,6 +17,7 @@
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/BlockAndValueMapping.h"
 #include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/ImplicitLocOpBuilder.h"
 #include "mlir/IR/OpDefinition.h"
 #include "mlir/Interfaces/ViewLikeInterface.h"
 

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index b7f56edff4fd..91bccdb23956 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -1233,7 +1233,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
         Returns a null function if this named op does not define a region
         builder.
       }],
-      /*retTy=*/"std::function<void(Block &, ValueRange)>",
+      /*retTy=*/"std::function<void(ImplicitLocOpBuilder &, Block &, ValueRange)>",
       /*methodName=*/"getRegionBuilder",
       (ins),
       [{ return ConcreteOp::getRegionBuilder(); }]

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
index e0fc25992a49..a9299a41e081 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
@@ -1,7 +1,7 @@
 ods_def<MatmulColumnMajorOp>
 implements_interface<LinalgContractionOpInterface> :
 def matmul_column_major(A: f32(K, M), B: f32(N, K)) -> (C: f32(N, M)) {
-  C(n, m) = std_addf<k>(C(n, m), std_mulf(A(k, m), B(n, k)));
+  C(n, m) = AddFOp<k>(C(n, m), MulFOp(A(k, m), B(n, k)));
 }
 
 ods_def<MatmulI8I8I32Op>
@@ -9,146 +9,146 @@ implements_interface<LinalgContractionOpInterface> :
 def matmul_i8_i8_i32(A: i8(M, K), B: i8(K, N)) -> (C: i32(M, N)) {
   // TODO: ideally something closer to
   //   C(m, n) += cast<i32>(A(m, k)) * cast<i32>(B(k, n))
-  C(m, n) = std_addi<k>(C(m, n), std_muli(std_sexti32(A(m, k)), std_sexti32(B(k, n))));
+  C(m, n) = AddIOp<k>(C(m, n), MulIOp(SignExtendIOp32(A(m, k)), SignExtendIOp32(B(k, n))));
 }
 
 ods_def<MatmulI16I16I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def matmul_i16_i16_i32(A: i16(M, K), B: i16(K, N)) -> (C: i32(M, N)) {
-  C(m, n) = std_addi<k>(C(m, n), std_muli(std_sexti32(A(m, k)), std_sexti32(B(k, n))));
+  C(m, n) = AddIOp<k>(C(m, n), MulIOp(SignExtendIOp32(A(m, k)), SignExtendIOp32(B(k, n))));
 }
 
 ods_def<MatmulI32I32I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def matmul_i32_i32_i32(A: i32(M, K), B: i32(K, N)) -> (C: i32(M, N)) {
-  C(m, n) = std_addi<k>(C(m, n), std_muli(A(m, k), B(k, n)));
+  C(m, n) = AddIOp<k>(C(m, n), MulIOp(A(m, k), B(k, n)));
 }
 
 ods_def<MatvecI8I8I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def matvec_i8_i8_i32(A: i8(M, N), y: i8(N)) -> (x: i32(M)) {
-  x(m) = std_addi<n>(x(m), std_muli(std_sexti32(A(m, n)), std_sexti32(y(n))));
+  x(m) = AddIOp<n>(x(m), MulIOp(SignExtendIOp32(A(m, n)), SignExtendIOp32(y(n))));
 }
 
 ods_def<MatvecI16I16I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def matvec_i16_i16_i32(A: i16(M, N), y: i16(N)) -> (x: i32(M)) {
-  x(m) = std_addi<n>(x(m), std_muli(std_sexti32(A(m, n)), std_sexti32(y(n))));
+  x(m) = AddIOp<n>(x(m), MulIOp(SignExtendIOp32(A(m, n)), SignExtendIOp32(y(n))));
 }
 
 ods_def<MatvecI32I32I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def matvec_i32_i32_i32(A: i32(M, N), y: i32(N)) -> (x: i32(M)) {
-  x(m) = std_addi<n>(x(m), std_muli(A(m, n), y(n)));
+  x(m) = AddIOp<n>(x(m), MulIOp(A(m, n), y(n)));
 }
 
 ods_def<VecmatI8I8I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def vecmat_i8_i8_i32(y: i8(M), A: i8(M, N)) -> (x: i32(N)) {
-  x(n) = std_addi<m>(x(n), std_muli(std_sexti32(y(m)), std_sexti32(A(m, n))));
+  x(n) = AddIOp<m>(x(n), MulIOp(SignExtendIOp32(y(m)), SignExtendIOp32(A(m, n))));
 }
 
 ods_def<VecmatI16I16I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def vecmat_i16_i16_i32(y: i16(M), A: i16(M, N)) -> (x: i32(N)) {
-  x(n) = std_addi<m>(x(n), std_muli(std_sexti32(y(m)), std_sexti32(A(m, n))));
+  x(n) = AddIOp<m>(x(n), MulIOp(SignExtendIOp32(y(m)), SignExtendIOp32(A(m, n))));
 }
 
 ods_def<VecmatI32I32I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def vecmat_i32_i32_i32(y: i32(M), A: i32(M, N)) -> (x: i32(N)) {
-  x(n) = std_addi<m>(x(n), std_muli(y(m), A(m, n)));
+  x(n) = AddIOp<m>(x(n), MulIOp(y(m), A(m, n)));
 }
 
 ods_def<DotI8I8I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def dot_i8_i8_i32(A: i8(M), B: i8(M)) -> (C: i32()) {
-  C() = std_addi<m>(C(), std_muli(std_sexti32(A(m)), std_sexti32(B(m))));
+  C() = AddIOp<m>(C(), MulIOp(SignExtendIOp32(A(m)), SignExtendIOp32(B(m))));
 }
 
 ods_def<DotI16I16I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def dot_i16_i16_i32(A: i16(M), B: i16(M)) -> (C: i32()) {
-  C() = std_addi<m>(C(), std_muli(std_sexti32(A(m)), std_sexti32(B(m))));
+  C() = AddIOp<m>(C(), MulIOp(SignExtendIOp32(A(m)), SignExtendIOp32(B(m))));
 }
 
 ods_def<DotI32I32I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def dot_i32_i32_i32(A: i32(M), B: i32(M)) -> (C: i32()) {
-  C() = std_addi<m>(C(), std_muli(A(m), B(m)));
+  C() = AddIOp<m>(C(), MulIOp(A(m), B(m)));
 }
 
 ods_def<BatchMatmulI8I8I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def batch_matmul_i8_i8_i32(A: i8(Batch, M, K), B: i8(Batch, K, N)) -> (C: i32(Batch, M, N)) {
   C(b, m, n) =
-      std_addi<k>(C(b, m, n), std_muli(std_sexti32(A(b, m, k)), std_sexti32(B(b, k, n))));
+      AddIOp<k>(C(b, m, n), MulIOp(SignExtendIOp32(A(b, m, k)), SignExtendIOp32(B(b, k, n))));
 }
 
 ods_def<BatchMatmulI16I16I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def batch_matmul_i16_i16_i32(A: i16(Batch, M, K), B: i16(Batch, K, N)) -> (C: i32(Batch, M, N)) {
   C(b, m, n) =
-      std_addi<k>(C(b, m, n), std_muli(std_sexti32(A(b, m, k)), std_sexti32(B(b, k, n))));
+      AddIOp<k>(C(b, m, n), MulIOp(SignExtendIOp32(A(b, m, k)), SignExtendIOp32(B(b, k, n))));
 }
 
 
 ods_def<BatchMatmulI32I32I32Op>
 implements_interface<LinalgContractionOpInterface> :
 def batch_matmul_i32_i32_i32(A: i32(Batch, M, K), B: i32(Batch, K, N)) -> (C: i32(Batch, M, N)) {
-  C(b, m, n) = std_addi<k>(C(b, m, n), std_muli(A(b, m, k), B(b, k, n)));
+  C(b, m, n) = AddIOp<k>(C(b, m, n), MulIOp(A(b, m, k), B(b, k, n)));
 }
 
 ods_def<ConvWOp>:
 def conv_1d(I: f32(W), K: f32(KW)) -> (O: f32(W)) {
-  O(w) = std_addf<kw>(O(w), std_mulf(I(w + kw), K(kw)));
+  O(w) = AddFOp<kw>(O(w), MulFOp(I(w + kw), K(kw)));
 }
 
 ods_def<ConvNWCOp>:
 def conv_1d_nwc(I: f32(N, W, C), K: f32(F, KW, C)) -> (O: f32(N, W, F)) {
-  O(n, w, f) = std_addf<kw>(O(n, w, f), std_mulf(I(n, w + kw, c), K(f, kw, c)));
+  O(n, w, f) = AddFOp<kw>(O(n, w, f), MulFOp(I(n, w + kw, c), K(f, kw, c)));
 }
 
 ods_def<ConvNCWOp>:
 def conv_1d_ncw(I: f32(N, C, W), K: f32(F, C, KW)) -> (O: f32(N, F, W)) {
-  O(n, f, w) = std_addf<kw>(O(n, f, w), std_mulf(I(n, c, w + kw), K(f, c, kw)));
+  O(n, f, w) = AddFOp<kw>(O(n, f, w), MulFOp(I(n, c, w + kw), K(f, c, kw)));
 }
 
 ods_def<ConvHWOp>:
 def conv_2d(I: f32(H, W), K: f32(KH, KW)) -> (O: f32(H, W)) {
-  O(h, w) = std_addf<kh, kw>(O(h, w), std_mulf(I(h + kh, w + kw), K(kh, kw)));
+  O(h, w) = AddFOp<kh, kw>(O(h, w), MulFOp(I(h + kh, w + kw), K(kh, kw)));
 }
 
 ods_def<ConvNHWCOp>:
 def conv_2d_nhwc(I: f32(N, H, W, C), K: f32(F, KH, KW, C)) -> (O: f32(N, H, W, F)) {
-  O(n, h, w, f) = std_addf<kh, kw>(
-      O(n, h, w, f), std_mulf(I(n, h + kh, w + kw, c), K(f, kh, kw, c)));
+  O(n, h, w, f) = AddFOp<kh, kw>(
+      O(n, h, w, f), MulFOp(I(n, h + kh, w + kw, c), K(f, kh, kw, c)));
 }
 
 ods_def<ConvNCHWOp>:
 def conv_2d_nchw(I: f32(N, C, H, W), K: f32(F, C, KH, KW)) -> (O: f32(N, F, H, W)) {
-  O(n, f, h, w) = std_addf<kh, kw>(
-      O(n, f, h, w), std_mulf(I(n, c, h + kh, w + kw), K(f, c, kh, kw)));
+  O(n, f, h, w) = AddFOp<kh, kw>(
+      O(n, f, h, w), MulFOp(I(n, c, h + kh, w + kw), K(f, c, kh, kw)));
 }
 
 ods_def<ConvDHWOp>:
 def conv_3d(I: f32(D, H, W), K: f32(KD, KH, KW)) -> (O: f32(D, H, W)) {
-  O(d, h, w) = std_addf<kd, kh, kw>(
-      O(d, h, w), std_mulf(I(d + kd, h + kh, w + kw), K(kd, kh, kw)));
+  O(d, h, w) = AddFOp<kd, kh, kw>(
+      O(d, h, w), MulFOp(I(d + kd, h + kh, w + kw), K(kd, kh, kw)));
 }
 
 ods_def<ConvNDHWCOp>:
 def conv_3d_ndhwc(I: f32(N, D, H, W, C), K: f32(F, KD, KH, KW, C)) -> (O: f32(N, D, H, W, F)) {
-  O(n, d, h, w, f) = std_addf<kd, kh, kw>(
+  O(n, d, h, w, f) = AddFOp<kd, kh, kw>(
       O(n, d, h, w, f),
-      std_mulf(I(n, d + kd, h + kh, w + kw, c), K(f, kd, kh, kw, c)));
+      MulFOp(I(n, d + kd, h + kh, w + kw, c), K(f, kd, kh, kw, c)));
 }
 
 ods_def<ConvNCDHWOp>:
 def conv_3d_ncdhw(I: f32(N, C, D, H, W), K: f32(F, C, KD, KH, KW)) -> (O: f32(N, F, D, H, W)) {
-  O(n, f, d, h, w) = std_addf<kd, kh, kw>(
+  O(n, f, d, h, w) = AddFOp<kd, kh, kw>(
       O(n, f, d, h, w),
-      std_mulf(I(n, c, d + kd, h + kh, w + kw), K(f, c, kd, kh, kw)));
+      MulFOp(I(n, c, d + kd, h + kh, w + kw), K(f, c, kd, kh, kw)));
 }
 
 ods_def<DepthwiseConvInputNHWCFilterHWCFOp>:
@@ -162,9 +162,9 @@ This operation performs depth-wise 2-D convolution over an input `I` and filter
 `F` and generates output `O` using the following computation:
 
 ```
-  O(n, oh, ow, ci, co) = std_addf<kh, kw>(
+  O(n, oh, ow, ci, co) = AddFOp<kh, kw>(
       O(n, oh, ow, ci, co),
-      std_mulf(I(n, oh * strides[0] + kh, ow * strides[1] + kw, ci),
+      MulFOp(I(n, oh * strides[0] + kh, ow * strides[1] + kw, ci),
                K(kh, kw, ci, co)));
 ```
 
@@ -184,9 +184,9 @@ to 4D result as DepthwiseConvInputNHWCFilterHWCOp, you will have to create a
 Linalg reshape op which collapses `CI` and `CO` into one dimension.
 """
 {
-  O(n, oh, ow, ci, co) = std_addf<kh, kw>(
+  O(n, oh, ow, ci, co) = AddFOp<kh, kw>(
       O(n, oh, ow, ci, co),
-      std_mulf(I(n, oh * strides[0] + kh, ow * strides[1] + kw, ci),
+      MulFOp(I(n, oh * strides[0] + kh, ow * strides[1] + kw, ci),
                K(kh, kw, ci, co)));
 }
 
@@ -201,9 +201,9 @@ This operation performs depth-wise 2-D convolution over an input `I` and filter
 `F` and generates output `O` using the following computation:
 
 ```
-O(n, oh, ow, c) = std_addf<kh, kw>(
+O(n, oh, ow, c) = AddFOp<kh, kw>(
     O(n, oh, ow, c),
-    std_mulf(I(n, oh * strides[0] + kh, ow * strides[1] + kw, c),
+    MulFOp(I(n, oh * strides[0] + kh, ow * strides[1] + kw, c),
              K(kh, kw, c)));
 ```
 
@@ -221,9 +221,9 @@ order of (`N`, `OH`, `OW`, `C`, `KH`, `KW`).
 Note: this op only supports channel multiplier == 1.
 """
 {
-  O(n, oh, ow, c) = std_addf<kh, kw>(
+  O(n, oh, ow, c) = AddFOp<kh, kw>(
       O(n, oh, ow, c),
-      std_mulf(I(n, oh * strides[0] + kh, ow * strides[1] + kw, c),
+      MulFOp(I(n, oh * strides[0] + kh, ow * strides[1] + kw, c),
                K(kh, kw, c)));
 }
 
@@ -239,9 +239,9 @@ The indexing maps for these three tensors contain 5 dimensions, following the
 order of (`N`, `W`, `F`, `KW`, `C`).
 """
 {
-  O(n, w, f) = std_addf<kw>(
+  O(n, w, f) = AddFOp<kw>(
       O(n, w, f),
-      std_mulf(I(n, w * strides[0] + kw * dilations[0], c), K(kw, c, f)));
+      MulFOp(I(n, w * strides[0] + kw * dilations[0], c), K(kw, c, f)));
 }
 
 ods_def<ConvInputNCWFilterWCFOp>:
@@ -256,9 +256,9 @@ The indexing maps for these three tensors contain 5 dimensions, following the
 order of (`N`, `F`, `W`, `KW`, `C`).
 """
 {
-  O(n, f, w) = std_addf<kw>(
+  O(n, f, w) = AddFOp<kw>(
       O(n, f, w),
-      std_mulf(I(n, c, w * strides[0] + kw * dilations[0]), K(kw, c, f)));
+      MulFOp(I(n, c, w * strides[0] + kw * dilations[0]), K(kw, c, f)));
 }
 
 ods_def<ConvInputNHWCFilterHWCFOp>:
@@ -273,8 +273,8 @@ The indexing maps for these three tensors contain 7 dimensions, following the
 order of (`N`, `H`, `W`, `F`, `KH`, `KW`, `C`).
 """
 {
-  O(n, h, w, f) = std_addf<kh, kw>(
-      O(n, h, w, f), std_mulf(I(n, h * strides[0] + kh * dilations[0],
+  O(n, h, w, f) = AddFOp<kh, kw>(
+      O(n, h, w, f), MulFOp(I(n, h * strides[0] + kh * dilations[0],
                                 w * strides[1] + kw * dilations[1], c),
                               K(kh, kw, c, f)));
 }
@@ -293,8 +293,8 @@ The indexing maps for these three tensors contain 7 dimensions, following the
 order of (`N`, `F`, `H`, `W`, `KH`, `KW`, `C`).
 """
 {
-  O(n, f, h, w) = std_addf<kh, kw>(
-      O(n, f, h, w), std_mulf(I(n, c, h * strides[0] + kh * dilations[0],
+  O(n, f, h, w) = AddFOp<kh, kw>(
+      O(n, f, h, w), MulFOp(I(n, c, h * strides[0] + kh * dilations[0],
                                 w * strides[1] + kw * dilations[1]),
                               K(kh, kw, c, f)));
 }
@@ -313,8 +313,8 @@ The indexing maps for these three tensors contain 9 dimensions, following the
 order of (`N`, `D`, `H`, `W`, `F`, `KD`, `KH`, `KW`, `C`).
 """
 {
-  O(n, d, h, w, f) = std_addf<kd, kh, kw>(
-      O(n, d, h, w, f), std_mulf(I(n, d * strides[0] + kd * dilations[0],
+  O(n, d, h, w, f) = AddFOp<kd, kh, kw>(
+      O(n, d, h, w, f), MulFOp(I(n, d * strides[0] + kd * dilations[0],
                                    h * strides[1] + kh * dilations[1],
                                    w * strides[2] + kw * dilations[2], c),
                                  K(kd, kh, kw, c, f)));
@@ -334,8 +334,8 @@ The indexing maps for these three tensors contain 9 dimensions, following the
 order of (`N`, `F`, `D`, `H`, `W`, `KD`, `KH`, `KW`, `C`).
 """
 {
-  O(n, f, d, h, w) = std_addf<kd, kh, kw>(
-      O(n, f, d, h, w), std_mulf(I(n, c, d * strides[0] + kd * dilations[0],
+  O(n, f, d, h, w) = AddFOp<kd, kh, kw>(
+      O(n, f, d, h, w), MulFOp(I(n, c, d * strides[0] + kd * dilations[0],
                                    h * strides[1] + kh * dilations[1],
                                    w * strides[2] + kw * dilations[2]),
                                  K(kd, kh, kw, c, f)));
@@ -347,7 +347,7 @@ def pooling_nhwc_sum
   -> (O: f32(N, OH, OW, C))
   attr(strides: 2xi64, dilations: 2xi64)
 {
-  O(n, oh, ow, c) = std_addf<kh, kw>(O(n, oh, ow, c),
+  O(n, oh, ow, c) = AddFOp<kh, kw>(O(n, oh, ow, c),
                                      I(n, oh * strides[0] + kh * dilations[0],
                                        ow * strides[1] + kw * dilations[1], c));
 }
@@ -359,7 +359,7 @@ def pooling_nhwc_i8_max
   attr(strides: 2xi64, dilations: 2xi64)
 {
   O(n, oh, ow, c) =
-      std_select<kh, kw>(std_cmpi_sgt(I(n, oh * strides[0] + kh * dilations[0],
+      SelectOp<kh, kw>(CmpIOpSGT(I(n, oh * strides[0] + kh * dilations[0],
                                         ow * strides[1] + kw * dilations[1], c),
                                       O(n, oh, ow, c)),
                          I(n, oh * strides[0] + kh * dilations[0],
@@ -374,7 +374,7 @@ def pooling_nhwc_i16_max
   attr(strides: 2xi64, dilations: 2xi64)
 {
   O(n, oh, ow, c) =
-      std_select<kh, kw>(std_cmpi_sgt(I(n, oh * strides[0] + kh * dilations[0],
+      SelectOp<kh, kw>(CmpIOpSGT(I(n, oh * strides[0] + kh * dilations[0],
                                         ow * strides[1] + kw * dilations[1], c),
                                       O(n, oh, ow, c)),
                          I(n, oh * strides[0] + kh * dilations[0],
@@ -389,7 +389,7 @@ def pooling_nhwc_i32_max
   attr(strides: 2xi64, dilations: 2xi64)
 {
   O(n, oh, ow, c) =
-      std_select<kh, kw>(std_cmpi_sgt(I(n, oh * strides[0] + kh * dilations[0],
+      SelectOp<kh, kw>(CmpIOpSGT(I(n, oh * strides[0] + kh * dilations[0],
                                         ow * strides[1] + kw * dilations[1], c),
                                       O(n, oh, ow, c)),
                          I(n, oh * strides[0] + kh * dilations[0],
@@ -404,7 +404,7 @@ def pooling_nhwc_max
   attr(strides: 2xi64, dilations: 2xi64)
 {
   O(n, oh, ow, c) =
-      std_select<kh, kw>(std_cmpf_ogt(I(n, oh * strides[0] + kh * dilations[0],
+      SelectOp<kh, kw>(CmpFOpOGT(I(n, oh * strides[0] + kh * dilations[0],
                                         ow * strides[1] + kw * dilations[1], c),
                                       O(n, oh, ow, c)),
                          I(n, oh * strides[0] + kh * dilations[0],
@@ -419,7 +419,7 @@ def pooling_nhwc_min
   attr(strides: 2xi64, dilations: 2xi64)
 {
   O(n, oh, ow, c) =
-      std_select<kh, kw>(std_cmpf_olt(I(n, oh * strides[0] + kh * dilations[0],
+      SelectOp<kh, kw>(CmpFOpOLT(I(n, oh * strides[0] + kh * dilations[0],
                                         ow * strides[1] + kw * dilations[1], c),
                                       O(n, oh, ow, c)),
                          I(n, oh * strides[0] + kh * dilations[0],

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
index bf5efd58058a..44a912d44b9f 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
@@ -15,7 +15,6 @@
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/BlockAndValueMapping.h"
-#include "mlir/IR/Builders.h"
 #include "mlir/IR/BuiltinDialect.h"
 #include "mlir/IR/BuiltinTypes.h"
 #include "mlir/IR/OpDefinition.h"
@@ -41,7 +40,7 @@ class PoolingSumOp;
 // TOFO: allow an extra ValueRange to specify an indexing and allow
 // non-hyperrectangular shapes.
 using LoopRangeBuilder =
-    std::function<SmallVector<Range, 4>(OpBuilder &, Location)>;
+    std::function<SmallVector<Range, 4>(ImplicitLocOpBuilder)>;
 
 /// Provide a very simple inference procedure to build the loop ranges from the
 /// op and its operands. This only works with permutation affine maps and

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 8336668883a5..0a1b4adc3aee 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -160,8 +160,10 @@ def CopyOp : LinalgStructured_Op<"copy", [CopyOpInterface]> {
     Value getSource() { return input();}
     Value getTarget() { return output(); }
 
-    static void regionBuilder(Block &block, ValueRange captures);
-    static std::function<void(Block &block, ValueRange captures)>
+    static void regionBuilder(
+      ImplicitLocOpBuilder &b, Block &block, ValueRange captures);
+    static std::function<
+      void(ImplicitLocOpBuilder &b, Block &block, ValueRange captures)>
     getRegionBuilder() {
       return ®ionBuilder;
     }
@@ -205,8 +207,10 @@ def FillOp : LinalgStructured_Op<"fill", []> {
           extractOrIdentityMap(llvm::None, getNumParallelLoops(), context)});
     }
 
-    static void regionBuilder(Block &block, ValueRange captures);
-    static std::function<void(Block &block, ValueRange captures)>
+    static void regionBuilder(
+      ImplicitLocOpBuilder &b, Block &block, ValueRange captures);
+    static std::function<
+      void(ImplicitLocOpBuilder &b, Block &block, ValueRange captures)>
     getRegionBuilder() {
       return ®ionBuilder;
     }
@@ -295,8 +299,9 @@ class PoolingBase_Op<string mnemonic, list<OpTrait> props>
       return padding().getValue().getValue<int64_t>({i, 1});
     }
 
-    static std::function<void(Block &, ValueRange captures)> getRegionBuilder()
-    {
+    static std::function<
+      void(ImplicitLocOpBuilder &b, Block &block, ValueRange captures)>
+    getRegionBuilder() {
       return nullptr;
     }
   }];
@@ -543,7 +548,9 @@ class GenericOpBase<string mnemonic> : LinalgStructuredBase_Op<mnemonic, [
         library_call()->str() : "op_has_no_registered_library_name";
     }
 
-    static std::function<void(Block &, ValueRange)> getRegionBuilder() {
+    static std::function<
+      void(ImplicitLocOpBuilder &b, Block &block, ValueRange captures)>
+    getRegionBuilder() {
       return nullptr;
     }
   }];
@@ -551,7 +558,6 @@ class GenericOpBase<string mnemonic> : LinalgStructuredBase_Op<mnemonic, [
   let parser = [{ return ::parseGenericOp(parser, result); }];
 }
 
-/// Index-free GenericOp.
 def GenericOp : GenericOpBase<"generic"> {
   let description = [{
     Generic Linalg op form where the key properties of the computation are

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgTypes.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgTypes.h
index d94e43b78edf..af09671e3251 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgTypes.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgTypes.h
@@ -13,6 +13,7 @@
 #include "mlir/Dialect/StandardOps/IR/Ops.h"
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
 #include "mlir/IR/Dialect.h"
+#include "mlir/IR/ImplicitLocOpBuilder.h"
 #include "mlir/IR/Types.h"
 #include "llvm/ADT/StringMap.h"
 

diff  --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
index 33953ee37931..c9adaa4c69eb 100644
--- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
@@ -11,7 +11,6 @@
 
 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/MemRef/EDSC/Intrinsics.h"
 #include "mlir/Dialect/SCF/SCF.h"

diff  --git a/mlir/lib/CAPI/Dialect/Linalg.cpp b/mlir/lib/CAPI/Dialect/Linalg.cpp
index 6f6e090d737a..21e4e2ce816d 100644
--- a/mlir/lib/CAPI/Dialect/Linalg.cpp
+++ b/mlir/lib/CAPI/Dialect/Linalg.cpp
@@ -8,7 +8,6 @@
 
 #include "mlir-c/Dialect/Linalg.h"
 #include "mlir/CAPI/Registration.h"
-#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 
 using namespace mlir;
@@ -38,12 +37,11 @@ void mlirLinalgFillBuiltinNamedOpRegion(MlirDialect linalgDialect,
   for (auto t : linalgOp.getShapedOperandTypes())
     argTypes.push_back(getElementTypeOrSelf(t));
 
-  OpBuilder b(op->getContext());
+  ImplicitLocOpBuilder b(op->getLoc(), op->getContext());
   Region &region = op->getRegion(0);
   Block *body = b.createBlock(&region, /*insertPt=*/{}, argTypes);
   b.setInsertionPointToStart(body);
-  mlir::edsc::ScopedContext scope(b, op->getLoc());
-  fun(*body, captures);
+  fun(b, *body, captures);
 }
 
 MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(Linalg, linalg, LinalgDialect)

diff  --git a/mlir/lib/Dialect/Linalg/CMakeLists.txt b/mlir/lib/Dialect/Linalg/CMakeLists.txt
index 448d93bd0203..35c4201f2145 100644
--- a/mlir/lib/Dialect/Linalg/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/CMakeLists.txt
@@ -1,5 +1,4 @@
 add_subdirectory(Analysis)
-add_subdirectory(EDSC)
 add_subdirectory(IR)
 add_subdirectory(Transforms)
 add_subdirectory(Utils)

diff  --git a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
deleted file mode 100644
index 45017faf38c6..000000000000
--- a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
+++ /dev/null
@@ -1,255 +0,0 @@
-//===- Builders.cpp - MLIR Declarative Linalg Builders --------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/IR/Builders.h"
-#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
-#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Math/EDSC/Intrinsics.h"
-#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
-#include "mlir/IR/AffineExpr.h"
-
-using namespace mlir;
-using namespace mlir::edsc;
-using namespace mlir::edsc::intrinsics;
-using namespace mlir::linalg;
-using namespace mlir::scf;
-
-Operation *mlir::edsc::makeGenericLinalgOp(
-    ArrayRef<IteratorType> iteratorTypes, ArrayRef<StructuredIndexed> inputs,
-    ArrayRef<StructuredIndexed> outputs, TypeRange resultTensorTypes,
-    function_ref<void(ValueRange)> regionBuilder, ArrayRef<Value> otherValues,
-    ArrayRef<Attribute> otherAttributes) {
-  // Build maps
-  SmallVector<SmallVector<AffineExpr, 4>, 4> exprsList;
-  exprsList.reserve(inputs.size() + outputs.size());
-
-  for (auto container : {inputs, outputs})
-    for (const StructuredIndexed &s : container)
-      exprsList.emplace_back(s.getExprs().begin(), s.getExprs().end());
-  auto maps = AffineMap::inferFromExprList(exprsList);
-
-  SmallVector<Value, 4> inputValues, outputValues;
-  inputValues.reserve(inputs.size());
-  outputValues.reserve(outputs.size());
-  std::copy(inputs.begin(), inputs.end(), std::back_inserter(inputValues));
-  std::copy(outputs.begin(), outputs.end(), std::back_inserter(outputValues));
-
-  auto iteratorStrTypes =
-      llvm::to_vector<8>(llvm::map_range(iteratorTypes, toString));
-  // clang-format off
-  auto *op =
-      edsc::ScopedContext::getBuilderRef()
-          .create<linalg::GenericOp>(
-              edsc::ScopedContext::getLocation(),
-              resultTensorTypes,
-              inputValues,
-              outputValues,
-              maps,
-              iteratorStrTypes,
-              ""/*doc*/,
-              ""/*library_call*/)
-          .getOperation();
-  // clang-format on
-
-  using namespace edsc;
-  SmallVector<Type, 4> blockTypes;
-  blockTypes.reserve(inputs.size() + outputs.size());
-  for (auto container : {inputs, outputs})
-    for (const StructuredIndexed &s : container)
-      blockTypes.push_back(getElementTypeOrSelf(s.getType()));
-
-  assert(op->getNumRegions() == 1);
-  assert(op->getRegion(0).empty());
-  OpBuilder opBuilder(op);
-  ScopedContext scope(opBuilder, op->getLoc());
-  buildInNewBlock(op->getRegion(0), blockTypes, regionBuilder);
-  assert(llvm::hasSingleElement(op->getRegion(0)));
-  return op;
-}
-
-void mlir::edsc::ops::mulRegionBuilder(ValueRange args) {
-  using edsc::op::operator+;
-  using edsc::op::operator*;
-  assert(args.size() == 2 && "expected 2 block arguments");
-  Value a(args[0]), b(args[1]);
-  linalg_yield(a * b);
-}
-
-void mlir::edsc::ops::macRegionBuilder(ValueRange args) {
-  using edsc::op::operator+;
-  using edsc::op::operator*;
-  assert(args.size() == 3 && "expected 3 block arguments");
-  Value a(args[0]), b(args[1]), c(args[2]);
-  linalg_yield(c + a * b);
-}
-
-Operation *mlir::edsc::ops::linalg_generic_pointwise(
-    UnaryPointwiseOpBuilder unaryOp, StructuredIndexed I, StructuredIndexed O) {
-  SmallVector<IteratorType, 4> iterTypes(O.getExprs().size(),
-                                         IteratorType::Parallel);
-  auto fun = [&unaryOp](ValueRange args) {
-    assert(!args.empty() && "expected >= 1 block arguments");
-    Value a(args[0]);
-    linalg_yield(unaryOp(a));
-  };
-  if (O.getType().isa<RankedTensorType>())
-    return makeGenericLinalgOp(iterTypes, /*inputs=*/{I}, /*outputs=*/{O},
-                               /*resultTensorTypes=*/{O}, fun);
-  return makeGenericLinalgOp(iterTypes, /*inputs=*/{I}, /*outputs=*/{O},
-                             /*resultTensorTypes=*/{}, fun);
-}
-
-Operation *mlir::edsc::ops::linalg_generic_pointwise_tanh(StructuredIndexed I,
-                                                          StructuredIndexed O) {
-  UnaryPointwiseOpBuilder unOp([](Value a) -> Value { return math_tanh(a); });
-  return linalg_generic_pointwise(unOp, I, O);
-}
-
-/// Binary pointwise operation (with broadcast) entry point.
-Operation *mlir::edsc::ops::linalg_generic_pointwise(
-    BinaryPointwiseOpBuilder binaryOp, StructuredIndexed I1,
-    StructuredIndexed I2, StructuredIndexed O) {
-  SmallVector<IteratorType, 4> iterTypes(O.getExprs().size(),
-                                         IteratorType::Parallel);
-  auto fun = [&binaryOp](ValueRange args) {
-    assert(args.size() >= 2 && "expected >= 2 block arguments");
-    Value a(args[0]), b(args[1]);
-    linalg_yield(binaryOp(a, b));
-  };
-  if (O.getType().isa<RankedTensorType>())
-    return makeGenericLinalgOp(iterTypes, /*inputs=*/{I1, I2}, /*outputs=*/{O},
-                               /*resultTensorTypes=*/{O}, fun);
-  return makeGenericLinalgOp(iterTypes, /*inputs=*/{I1, I2},
-                             /*outputs=*/{O}, /*resultTensorTypes=*/{}, fun);
-}
-
-Operation *mlir::edsc::ops::linalg_generic_pointwise_add(StructuredIndexed I1,
-                                                         StructuredIndexed I2,
-                                                         StructuredIndexed O) {
-  using edsc::op::operator+;
-  BinaryPointwiseOpBuilder binOp(
-      [](Value a, Value b) -> Value { return a + b; });
-  return linalg_generic_pointwise(binOp, I1, I2, O);
-}
-
-Operation *mlir::edsc::ops::linalg_generic_pointwise_max(StructuredIndexed I1,
-                                                         StructuredIndexed I2,
-                                                         StructuredIndexed O) {
-  BinaryPointwiseOpBuilder binOp([](Value a, Value b) -> Value {
-    using edsc::op::sgt;
-    return std_select(sgt(a, b), a, b);
-  });
-  return linalg_generic_pointwise(binOp, I1, I2, O);
-}
-
-Operation *
-mlir::edsc::ops::linalg_generic_matmul(Value vA, Value vB, Value vC,
-                                       MatmulRegionBuilder regionBuilder) {
-  // clang-format off
-  AffineExpr m, n, k;
-  bindDims(ScopedContext::getContext(), m, n, k);
-  StructuredIndexed A(vA), B(vB), C(vC);
-  return makeGenericLinalgOp(
-    {IteratorType::Parallel, IteratorType::Parallel, IteratorType::Reduction},
-    /*inputs=*/{A({m, k}), B({k, n})},
-    /*outputs=*/{C({m, n})},
-    /*resultTensorTypes=*/{},
-    regionBuilder);
-  // clang-format on
-}
-
-Operation *
-mlir::edsc::ops::linalg_generic_matmul(Value vA, Value vB, Value vC,
-                                       RankedTensorType tD,
-                                       MatmulRegionBuilder regionBuilder) {
-  // clang-format off
-  AffineExpr m, n, k;
-  bindDims(ScopedContext::getContext(), m, n, k);
-  StructuredIndexed A(vA), B(vB), C(vC), D(tD);
-  return makeGenericLinalgOp(
-    {IteratorType::Parallel, IteratorType::Parallel, IteratorType::Reduction},
-    /*inputs=*/{A({m, k}), B({k, n})},
-    /*outputs=*/{C({m, n})},
-    /*resultTensorTypes=*/{D({m, n})},
-    regionBuilder);
-  // clang-format on
-}
-
-Operation *mlir::edsc::ops::linalg_generic_conv_nhwc(Value vI, Value vW,
-                                                     Value vO,
-                                                     ArrayRef<int> strides,
-                                                     ArrayRef<int> dilations) {
-  MLIRContext *ctx = ScopedContext::getContext();
-  // TODO: some template magic to make everything rank-polymorphic.
-  assert((dilations.empty() || dilations.size() == 2) && "only 2-D conv atm");
-  assert((strides.empty() || strides.size() == 2) && "only 2-D conv atm");
-
-  // Some short names.
-  auto par = IteratorType::Parallel;
-  auto red = IteratorType::Reduction;
-  auto s = strides;
-  auto d = dilations;
-
-  AffineExpr b, f, h, w, kh, kw, c;
-  bindDims(ctx, b, f, h, w, kh, kw, c);
-  unsigned numDims = c.cast<AffineDimExpr>().getPosition() + 1;
-  StructuredIndexed I(vI), W(vW), O(vO);
-  // clang-format off
-  return makeGenericLinalgOp(
-    {par, par, par, par, red, red, red},
-    /*inputs=*/{
-      I({b,
-         // Roundtrip to flattened form to serve as canonicalization and ensure
-         // consistent ordering of subexpressions.
-         simplifyAffineExpr(s[0] * h + d[0] * kh, numDims, 0),
-         simplifyAffineExpr(s[1] * w + d[1] * kw, numDims, 0),
-         c}),
-      W({kh, kw, c, f}) },
-    /*outputs=*/{ O({b, h, w, f}) },
-    /*resultTensorTypes=*/{},
-    macRegionBuilder);
-  // clang-format on
-}
-
-Operation *mlir::edsc::ops::linalg_generic_dilated_conv_nhwc(
-    Value vI, Value vW, Value vO, int depth_multiplier, ArrayRef<int> strides,
-    ArrayRef<int> dilations) {
-  MLIRContext *ctx = ScopedContext::getContext();
-  // TODO: some template magic to make everything rank-polymorphic.
-  assert((dilations.empty() || dilations.size() == 2) && "only 2-D conv atm");
-  assert((strides.empty() || strides.size() == 2) && "only 2-D conv atm");
-
-  // Some short names.
-  auto par = IteratorType::Parallel;
-  auto red = IteratorType::Reduction;
-  auto s = strides;
-  auto d = dilations;
-
-  // clang-format off
-  AffineExpr b, dm, c, h, w, kh, kw;
-  bindDims(ctx, b, dm, c, h, w, kh, kw);
-  unsigned numDims = kw.cast<AffineDimExpr>().getPosition() + 1;
-  StructuredIndexed I(vI), W(vW), O(vO);
-  return makeGenericLinalgOp(
-    {par, par, par, par, par, red, red},
-    /*inputs=*/{
-      I({b,
-         // Roundtrip to flattened form to serve as canonicalization and ensure
-         // consistent ordering of subexpressions.
-         simplifyAffineExpr(s[0] * h + d[0] * kh, numDims, 0),
-         simplifyAffineExpr(s[1] * w + d[1] * kw, numDims, 0),
-         c}),
-      W({kh, kw, c, dm})},
-    /*outputs=*/{
-      O({b, h, w, simplifyAffineExpr(c * depth_multiplier + dm, numDims, 0)})},
-    /*resultTensorTypes=*/{},
-    macRegionBuilder);
-  // clang-format on
-}

diff  --git a/mlir/lib/Dialect/Linalg/EDSC/CMakeLists.txt b/mlir/lib/Dialect/Linalg/EDSC/CMakeLists.txt
deleted file mode 100644
index c93e7582a876..000000000000
--- a/mlir/lib/Dialect/Linalg/EDSC/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-add_mlir_dialect_library(MLIRLinalgEDSC
-  Builders.cpp
-
-  ADDITIONAL_HEADER_DIRS
-  ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Linalg
-
-  LINK_LIBS PUBLIC
-  MLIREDSC
-  MLIRIR
-  MLIRAffine
-  MLIRAffineEDSC
-  MLIRLinalg
-  MLIRMath
-  MLIRMemRef
-  MLIRSCF
-  MLIRStandard
-  )

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index ee2136a8f110..f3de81801e67 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -13,7 +13,6 @@
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 
 #include "mlir/Dialect/Affine/IR/AffineOps.h"
-#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/MemRef/IR/MemRef.h"
 #include "mlir/Dialect/StandardOps/IR/Ops.h"
@@ -340,10 +339,10 @@ class RegionBuilderHelper {
 //===----------------------------------------------------------------------===//
 // CopyOp
 //===----------------------------------------------------------------------===//
-void CopyOp::regionBuilder(Block &block, ValueRange captures) {
-  using namespace edsc::intrinsics;
+void CopyOp::regionBuilder(ImplicitLocOpBuilder &b, Block &block,
+                           ValueRange captures) {
   assert(block.getNumArguments() == 2 && "CopyOp regionBuilder expects 2 args");
-  (linalg_yield(block.getArgument(0)));
+  b.create<linalg::YieldOp>(block.getArgument(0));
 }
 
 void CopyOp::build(OpBuilder &builder, OperationState &result, Value input,
@@ -420,10 +419,10 @@ void CopyOp::getEffects(
 //===----------------------------------------------------------------------===//
 // FillOp
 //===----------------------------------------------------------------------===//
-void FillOp::regionBuilder(Block &block, ValueRange captures) {
-  using namespace edsc::intrinsics;
+void FillOp::regionBuilder(ImplicitLocOpBuilder &b, Block &block,
+                           ValueRange captures) {
   assert(captures.size() == 1 && "FillOp regionBuilder expects 1 capture");
-  (linalg_yield(captures));
+  b.create<linalg::YieldOp>(captures);
 }
 
 void FillOp::build(OpBuilder &builder, OperationState &result, Value output,
@@ -2769,8 +2768,8 @@ fillStructuredOpRegion(OpBuilder &opBuilder, Region &region,
   }
 
   opBuilder.setInsertionPointToStart(body);
-  mlir::edsc::ScopedContext scope(opBuilder, opBuilder.getUnknownLoc());
-  NamedStructuredOpType::regionBuilder(*body, captures);
+  ImplicitLocOpBuilder b(opBuilder.getUnknownLoc(), opBuilder);
+  NamedStructuredOpType::regionBuilder(b, *body, captures);
 
   // indexing_maps is an auto-generated method.
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
index e52bb39815c7..b073bf0079f8 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
@@ -27,11 +27,9 @@ add_mlir_dialect_library(MLIRLinalgTransforms
   MLIRAffineUtils
   MLIRAnalysis
   MLIRComplex
-  MLIREDSC
   MLIRIR
   MLIRMemRef
   MLIRLinalgAnalysis
-  MLIRLinalgEDSC
   MLIRLinalg
   MLIRLinalgUtils
   MLIRSCF

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
index 8176888f0beb..e5b3fd29f23b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
@@ -19,6 +19,7 @@
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/Attributes.h"
 #include "mlir/IR/Builders.h"
+#include "mlir/IR/ImplicitLocOpBuilder.h"
 #include "mlir/IR/PatternMatch.h"
 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
 #include "llvm/ADT/SmallVector.h"
@@ -60,8 +61,9 @@ static GenericOp createGenericOpFromNamedOp(LinalgOp namedOp,
       namedOp.getLoc(), types, namedOp.getInputs(), namedOp.getOutputs(),
       indexingMaps, iterators,
       [&regionBuilder](OpBuilder &bodyBuilder, Location loc, ValueRange) {
-        edsc::ScopedContext scope(bodyBuilder, loc);
-        regionBuilder(*bodyBuilder.getBlock(), /*captures=*/{});
+        ImplicitLocOpBuilder b(loc, bodyBuilder);
+        regionBuilder(b, *bodyBuilder.getBlock(),
+                      /*captures=*/{});
       });
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 714ac2fe3b41..bbf7d2dd20f1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -11,17 +11,13 @@
 //===----------------------------------------------------------------------===//
 
 #include "PassDetail.h"
-#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Complex/IR/Complex.h"
-#include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
-#include "mlir/Dialect/MemRef/EDSC/Intrinsics.h"
 #include "mlir/Dialect/SCF/SCF.h"
-#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineExprVisitor.h"
 #include "mlir/IR/AffineMap.h"
@@ -29,11 +25,10 @@
 #include "mlir/Support/LLVM.h"
 #include "mlir/Transforms/FoldUtils.h"
 #include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/TypeSwitch.h"
 #include "llvm/Support/CommandLine.h"
 
 using namespace mlir;
-using namespace mlir::edsc;
-using namespace mlir::edsc::intrinsics;
 using namespace mlir::linalg;
 using namespace mlir::scf;
 
@@ -80,10 +75,11 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
 /// no call back to do so is provided. The default is to allocate a
 /// memref<..xi8> and return a view to get a memref type of shape
 /// boundingSubViewSize.
-static Optional<Value> defaultAllocBufferCallBack(
-    const LinalgPromotionOptions &options, OpBuilder &builder,
-    memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
-    bool dynamicBuffers, Optional<unsigned> alignment, DataLayout &layout) {
+static Optional<Value>
+defaultAllocBufferCallBack(const LinalgPromotionOptions &options,
+                           OpBuilder &builder, memref::SubViewOp subView,
+                           ArrayRef<Value> boundingSubViewSize,
+                           Optional<unsigned> alignment, DataLayout &layout) {
   ShapedType viewType = subView.getType();
   ImplicitLocOpBuilder b(subView.getLoc(), builder);
   auto zero = b.createOrFold<ConstantIndexOp>(0);
@@ -108,10 +104,10 @@ static Optional<Value> defaultAllocBufferCallBack(
 static LogicalResult
 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
                              OpBuilder &b, Value fullLocalView) {
-  auto viewOp = fullLocalView.getDefiningOp<memref::ViewOp>();
-  assert(viewOp && "expected full local view to be a ViewOp");
-  if (!options.useAlloca)
-    memref_dealloc(viewOp.source());
+  if (!options.useAlloca) {
+    auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
+    b.create<memref::DeallocOp>(viewOp.source().getLoc(), viewOp.source());
+  }
   return success();
 }
 
@@ -125,7 +121,7 @@ struct LinalgOpInstancePromotionOptions {
   LinalgOpInstancePromotionOptions(LinalgOp op,
                                    const LinalgPromotionOptions &options);
   /// SubViews to promote.
-  MapVector<unsigned, Value> subViews;
+  MapVector<int64_t, Value> subViews;
   /// True if the full view should be used for the promoted buffer.
   DenseMap<Value, bool> useFullTileBuffers;
 
@@ -138,6 +134,7 @@ struct LinalgOpInstancePromotionOptions {
 
   /// Allow the use of dynamically-sized buffers.
   bool dynamicBuffers;
+
   /// Alignment of promoted buffer.
   Optional<unsigned> alignment;
 };
@@ -148,12 +145,12 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
     : subViews(), dynamicBuffers(options.dynamicBuffers),
       alignment(options.alignment) {
   assert(linalgOp.hasBufferSemantics() && "revisit usage of shaped operand");
-  unsigned nBuffers = linalgOp.getNumShapedOperands();
+  int64_t nBuffers = linalgOp.getNumShapedOperands();
   auto vUseFullTileBuffers =
       options.useFullTileBuffers.getValueOr(llvm::SmallBitVector());
   vUseFullTileBuffers.resize(nBuffers, options.useFullTileBuffersDefault);
 
-  for (unsigned idx = 0; idx != nBuffers; ++idx) {
+  for (int64_t idx = 0; idx != nBuffers; ++idx) {
     if (options.operandsToPromote && !options.operandsToPromote->count(idx))
       continue;
     auto *op = linalgOp.getShapedOperand(idx).getDefiningOp();
@@ -163,24 +160,30 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
     }
   }
 
-  allocationFn = (options.allocationFn
-                      ? *(options.allocationFn)
-                      : [&](OpBuilder &builder, memref::SubViewOp subViewOp,
-                            ArrayRef<Value> boundingSubViewSize,
-                            DataLayout &layout) -> Optional<Value> {
-    return defaultAllocBufferCallBack(options, builder, subViewOp,
-                                      boundingSubViewSize, dynamicBuffers,
-                                      alignment, layout);
-  });
-  deallocationFn =
-      (options.deallocationFn
-           ? *(options.deallocationFn)
-           : [&](OpBuilder &b, Value buffer) {
-               return defaultDeallocBufferCallBack(options, b, buffer);
-             });
-  auto defaultCopyCallBack = [&](OpBuilder &builder, Value src,
-                                 Value dst) -> LogicalResult {
-    linalg_copy(src, dst);
+  if (options.allocationFn) {
+    allocationFn = *options.allocationFn;
+  } else {
+    allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
+                       ArrayRef<Value> boundingSubViewSize,
+                       DataLayout &layout) -> Optional<Value> {
+      return defaultAllocBufferCallBack(options, b, subViewOp,
+                                        boundingSubViewSize, alignment, layout);
+    };
+  }
+
+  if (options.deallocationFn) {
+    deallocationFn = *options.deallocationFn;
+  } else {
+    deallocationFn = [&](OpBuilder &b, Value buffer) {
+      return defaultDeallocBufferCallBack(options, b, buffer);
+    };
+  }
+
+  // Save the loc because `linalgOp` goes out of scope.
+  Location loc = linalgOp.getLoc();
+  auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
+                                   Value dst) -> LogicalResult {
+    b.create<linalg::CopyOp>(loc, src, dst);
     return success();
   };
   copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
@@ -207,7 +210,6 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
 Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
     OpBuilder &b, Location loc, memref::SubViewOp subView,
     AllocBufferCallbackFn allocationFn, DataLayout &layout) {
-  ScopedContext scopedContext(b, loc);
   auto viewType = subView.getType();
   auto rank = viewType.getRank();
   SmallVector<Value, 4> fullSizes;
@@ -223,7 +225,8 @@ Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
         (!sizeAttr) ? rangeValue.size : b.create<ConstantOp>(loc, sizeAttr);
     LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
     fullSizes.push_back(size);
-    partialSizes.push_back(memref_dim(subView, en.index()).value);
+    partialSizes.push_back(
+        b.createOrFold<memref::DimOp>(loc, subView, en.index()));
   }
   SmallVector<int64_t, 4> dynSizes(fullSizes.size(), -1);
   // If a callback is not specified, then use the default implementation for
@@ -238,20 +241,19 @@ Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
   return PromotionInfo{*fullLocalView, partialLocalView};
 }
 
-static Optional<MapVector<unsigned, PromotionInfo>>
-promoteSubViews(OpBuilder &b, Location loc,
+static Optional<MapVector<int64_t, PromotionInfo>>
+promoteSubViews(ImplicitLocOpBuilder &b,
                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
   if (options.subViews.empty())
     return {};
 
-  ScopedContext scope(b, loc);
-  MapVector<unsigned, PromotionInfo> promotionInfoMap;
+  MapVector<int64_t, PromotionInfo> promotionInfoMap;
 
   for (auto v : options.subViews) {
     memref::SubViewOp subView =
         cast<memref::SubViewOp>(v.second.getDefiningOp());
     Optional<PromotionInfo> promotionInfo = promoteSubviewAsNewBuffer(
-        b, loc, subView, options.allocationFn, layout);
+        b, b.getLoc(), subView, options.allocationFn, layout);
     if (!promotionInfo)
       return {};
     promotionInfoMap[v.first] = *promotionInfo;
@@ -259,23 +261,27 @@ promoteSubViews(OpBuilder &b, Location loc,
     // Only fill the buffer if the full local view is used
     if (!options.useFullTileBuffers[v.second])
       continue;
-    Value fillVal;
-    if (auto t = subView.getType().getElementType().dyn_cast<FloatType>()) {
-      fillVal = std_constant(FloatAttr::get(t, 0.0));
-    } else if (auto t =
-                   subView.getType().getElementType().dyn_cast<IntegerType>()) {
-      fillVal = std_constant_int(0, t);
-    } else if (auto t =
-                   subView.getType().getElementType().dyn_cast<ComplexType>()) {
-      if (auto et = t.getElementType().dyn_cast<FloatType>())
-        fillVal = std_constant(FloatAttr::get(et, 0.0));
-      else if (auto et = t.getElementType().cast<IntegerType>())
-        fillVal = std_constant_int(0, et);
-      fillVal = b.create<complex::CreateOp>(loc, t, fillVal, fillVal);
-    } else {
+    Type subviewEltType = subView.getType().getElementType();
+    Value fillVal =
+        llvm::TypeSwitch<Type, Value>(subviewEltType)
+            .Case([&](FloatType t) {
+              return b.create<ConstantOp>(FloatAttr::get(t, 0.0));
+            })
+            .Case([&](IntegerType t) {
+              return b.create<ConstantOp>(IntegerAttr::get(t, 0));
+            })
+            .Case([&](ComplexType t) {
+              Value tmp;
+              if (auto et = t.getElementType().dyn_cast<FloatType>())
+                tmp = b.create<ConstantOp>(FloatAttr::get(et, 0.0));
+              else if (auto et = t.getElementType().cast<IntegerType>())
+                tmp = b.create<ConstantOp>(IntegerAttr::get(et, 0));
+              return b.create<complex::CreateOp>(t, tmp, tmp);
+            })
+            .Default([](auto) { return Value(); });
+    if (!fillVal)
       return {};
-    }
-    linalg_fill(promotionInfo->fullLocalView, fillVal);
+    b.create<linalg::FillOp>(promotionInfo->fullLocalView, fillVal);
   }
 
   // Copy data into the promoted buffers. Use callback if provided.
@@ -292,7 +298,7 @@ promoteSubViews(OpBuilder &b, Location loc,
 }
 
 static Optional<LinalgOp>
-promoteSubViews(OpBuilder &b, LinalgOp op,
+promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
   assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics");
 
@@ -303,8 +309,7 @@ promoteSubViews(OpBuilder &b, LinalgOp op,
   }
 
   // 1. Promote the specified views and use them in the new op.
-  auto loc = op.getLoc();
-  auto promotedBuffersAndViews = promoteSubViews(b, loc, options, layout);
+  auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
   if (!promotedBuffersAndViews ||
       promotedBuffersAndViews->size() != options.subViews.size())
     return {};
@@ -336,7 +341,6 @@ promoteSubViews(OpBuilder &b, LinalgOp op,
 
   OpBuilder::InsertionGuard guard(b);
   b.setInsertionPointAfter(op);
-  ScopedContext scope(b, loc);
   // 3. Emit write-back for the promoted output views: copy the partial view.
   for (auto viewAndPartialLocalView : writebackViews) {
     if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
@@ -372,10 +376,11 @@ mlir::linalg::promoteSubviewsPrecondition(Operation *op,
 }
 
 Optional<LinalgOp>
-mlir::linalg::promoteSubViews(OpBuilder &b, LinalgOp linalgOp,
+mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
                               LinalgPromotionOptions options) {
   LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
   auto layout = DataLayout::closest(linalgOp);
+  ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
   return ::promoteSubViews(b, linalgOp, linalgOptions, layout);
 }
 
@@ -388,14 +393,14 @@ struct LinalgPromotionPass : public LinalgPromotionBase<LinalgPromotionPass> {
   }
 
   void runOnFunction() override {
-    getFunction().walk([this](LinalgOp op) {
+    getFunction().walk([&](LinalgOp op) {
       auto options = LinalgPromotionOptions()
                          .setDynamicBuffers(dynamicBuffers)
                          .setUseAlloca(useAlloca);
       if (failed(promoteSubviewsPrecondition(op, options)))
         return;
       LLVM_DEBUG(llvm::dbgs() << "Promote: " << *(op.getOperation()) << "\n");
-      OpBuilder b(op);
+      ImplicitLocOpBuilder b(op.getLoc(), op);
       promoteSubViews(b, op, options);
     });
   }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 9450d581edec..f2420fa4cbfe 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -12,7 +12,6 @@
 
 #include "PassDetail.h"
 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"

diff  --git a/mlir/lib/Dialect/Linalg/Utils/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Utils/CMakeLists.txt
index 0d092ddae56a..51e4f79077d9 100644
--- a/mlir/lib/Dialect/Linalg/Utils/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/Utils/CMakeLists.txt
@@ -6,9 +6,8 @@ add_mlir_dialect_library(MLIRLinalgUtils
 
   LINK_LIBS PUBLIC
   MLIRAffine
-  MLIREDSC
+  MLIRAffineEDSC
   MLIRIR
-  MLIRLinalgEDSC
   MLIRLinalg
   MLIRSCF
   MLIRPass

diff  --git a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-gen.tc b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-gen.tc
index 8c4c31da2fce..84adc8b260c4 100644
--- a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-gen.tc
+++ b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-gen.tc
@@ -23,15 +23,16 @@
 //  IMPL-NEXT: map2 = simplifyAffineMap(map2);
 //  IMPL-NEXT: return {{.+}}.getAffineMapArrayAttr({ map0, map1, map2 });
 //
-//       IMPL:  void Test1Op::regionBuilder(Block &block, ValueRange captures) {
+//       IMPL:  void Test1Op::regionBuilder(ImplicitLocOpBuilder &b,
+//       IMPL:    Block &block, ValueRange captures) {
 //       IMPL:  Value [[a:.*]](args[0]), [[b:.*]](args[1]), [[c:.*]](args[2]);
-//       IMPL:  Value [[d:.*]] = std_mulf([[a]], [[b]]);
-//       IMPL:  Value [[e:.*]] = std_addf([[c]], [[d]]);
-//       IMPL:  (linalg_yield(ValueRange{ [[e]] }));
+//       IMPL:  Value [[d:.*]] = b.create<MulFOp>([[a]], [[b]]);
+//       IMPL:  Value [[e:.*]] = b.create<AddFOp>([[c]], [[d]]);
+//       IMPL:  b.create<linalg::YieldOp>(ValueRange{ [[e]] });
 //
 ods_def<Test1Op> :
 def test1(A: f32(M, K), B: f32(K)) -> (C: f32(M)) {
-  C(m) = std_addf<k>(C(m), std_mulf(A(m, k), B(k)));
+  C(m) = AddFOp<k>(C(m), MulFOp(A(m, k), B(k)));
 }
 
 // ODS-LABEL: def Test2Op : LinalgStructuredBase_Op<"test2", [
@@ -47,15 +48,16 @@ def test1(A: f32(M, K), B: f32(K)) -> (C: f32(M)) {
 //       IMPL:  AffineMap::get(3, 3, {d2, d1}, context)
 //       IMPL:  AffineMap::get(3, 3, {d0, d1}, context)
 //
-//       IMPL:  Test2Op::regionBuilder(Block &block, ValueRange captures) {
+//       IMPL:  Test2Op::regionBuilder(ImplicitLocOpBuilder &b,
+//       IMPL:    Block &block, ValueRange captures) {
 //       IMPL:  Value [[a:.*]](args[0]), [[b:.*]](args[1]), [[c:.*]](args[2]);
-//       IMPL:  Value [[d:.*]] = std_mulf([[a]], [[b]]);
-//       IMPL:  Value [[e:.*]] = std_addf([[c]], [[d]]);
-//       IMPL:  (linalg_yield(ValueRange{ [[e]] }));
+//       IMPL:  Value [[d:.*]] = b.create<MulFOp>([[a]], [[b]]);
+//       IMPL:  Value [[e:.*]] = b.create<AddFOp>([[c]], [[d]]);
+//       IMPL:  b.create<linalg::YieldOp>(ValueRange{ [[e]] });
 //
 ods_def<Test2Op> :
 def test2(A: f32(M, K), B: f32(K, N)) -> (C: f32(M, N)) {
-  C(m, n) = std_addf<k>(C(m, n), std_mulf(A(m, k), B(k, n)));
+  C(m, n) = AddFOp<k>(C(m, n), MulFOp(A(m, k), B(k, n)));
 }
 
 // ODS-LABEL: def Test3Op : LinalgStructuredBase_Op<"test3", [
@@ -71,15 +73,16 @@ def test2(A: f32(M, K), B: f32(K, N)) -> (C: f32(M, N)) {
 //       IMPL:  AffineMap::get(4, 4, {d3, d2}, context)
 //       IMPL:  AffineMap::get(4, 4, {d0, d1, d2}, context)
 //
-//       IMPL:  Test3Op::regionBuilder(Block &block, ValueRange captures) {
+//       IMPL:  Test3Op::regionBuilder(ImplicitLocOpBuilder &b,
+//       IMPL:    Block &block, ValueRange captures) {
 //       IMPL:  Value [[a:.*]](args[0]), [[b:.*]](args[1]), [[c:.*]](args[2]);
-//       IMPL:  Value [[d:.*]] = std_mulf([[a]], [[b]]);
-//       IMPL:  Value [[e:.*]] = std_addf([[c]], [[d]]);
-//       IMPL:  (linalg_yield(ValueRange{ [[e]] }));
+//       IMPL:  Value [[d:.*]] = b.create<MulFOp>([[a]], [[b]]);
+//       IMPL:  Value [[e:.*]] = b.create<AddFOp>([[c]], [[d]]);
+//       IMPL:  b.create<linalg::YieldOp>(ValueRange{ [[e]] });
 //
 ods_def<Test3Op> :
 def test3(A: f32(Batch, M, K), B: f32(K, N)) -> (C: f32(Batch, M, N)) {
-  C(b, m, n) = std_addf<k>(C(b, m, n), std_mulf(A(b, m, k), B(k, n)));
+  C(b, m, n) = AddFOp<k>(C(b, m, n), MulFOp(A(b, m, k), B(k, n)));
 }
 
 // Test attribute definitions
@@ -115,7 +118,7 @@ attr(
   array_attr : f32[],
   optional_attr? : f32
 ) {
-  C(b, m, n) = std_addf<k>(C(b, m, n), std_mulf(A(b, m, k), B(k, n)));
+  C(b, m, n) = AddFOp<k>(C(b, m, n), MulFOp(A(b, m, k), B(k, n)));
 }
 
 // Test attribute usage in affine expressions
@@ -136,8 +139,8 @@ attr(
 ods_def<Test5Op>:
 def test5(I: f32(N, H, W, C), K: f32(F, KH, KW, C)) -> (O: f32(N, H, W, F))
      attr(strides: 2xi32) {
-  O(n, h, w, f) = std_addf<kh, kw>(
-      std_mulf(std_addf(I(n, h * strides[0] + kh, w * strides[1] + kw, c),
+  O(n, h, w, f) = AddFOp<kh, kw>(
+      MulFOp(AddFOp(I(n, h * strides[0] + kh, w * strides[1] + kw, c),
                         I(n, h * strides[0] + kh, w * strides[1] + kw, c)),
                K(f, kh, kw, c)));
 }
@@ -159,7 +162,7 @@ It has two inputs.
 It has one output.
 """
 {
-  C(m) = std_addf<k>(C(m), std_mulf(A(m, k), B(k)));
+  C(m) = AddFOp<k>(C(m), MulFOp(A(m, k), B(k)));
 }
 
 // Test attribute builder
@@ -174,19 +177,20 @@ ods_def<Test7Op>:
 def test7(A: f32(M, K), B: f32(K)) -> (C: f32(M))
      attr(attr_a: f32, attr_b: 4xi32)
 {
-  C(m) = std_addf<k>(C(m), std_mulf(A(m, k), B(k)));
+  C(m) = AddFOp<k>(C(m), MulFOp(A(m, k), B(k)));
 }
 
 // Test output arg order.
-// IMPL-LABEL:  void Test8Op::regionBuilder(Block &block, ValueRange captures) {
+// IMPL-LABEL:  void Test8Op::regionBuilder(ImplicitLocOpBuilder &b,
+//       IMPL:    Block &block, ValueRange captures) {
 //       IMPL:  Value [[a:.*]](args[0]), [[b:.*]](args[1]), [[c:.*]](args[2]);
-//       IMPL:  Value [[d:.*]] = std_mulf([[a]], [[b]]);
-//       IMPL:  Value [[e:.*]] = std_subf([[d]], [[c]]);
-//       IMPL:  (linalg_yield(ValueRange{ [[e]] }));
+//       IMPL:  Value [[d:.*]] = b.create<MulFOp>([[a]], [[b]]);
+//       IMPL:  Value [[e:.*]] = b.create<SubFOp>([[d]], [[c]]);
+//       IMPL:  b.create<linalg::YieldOp>(ValueRange{ [[e]] });
 ods_def<Test8Op>:
 def test8(A: f32(M, K), B: f32(K)) -> (C: f32(M))
 {
-  C(m) = std_subf<k>(std_mulf(A(m, k), B(k)), C(m));
+  C(m) = SubFOp<k>(MulFOp(A(m, k), B(k)), C(m));
 }
 
 // Test shape-only operand.
@@ -194,10 +198,11 @@ def test8(A: f32(M, K), B: f32(K)) -> (C: f32(M))
 //       IMPL:    auto map0 = AffineMap::get(2, 2, {d0, d1}, context);
 //       IMPL:    auto map1 = AffineMap::get(2, 2, {d1}, context);
 //       IMPL:    auto map2 = AffineMap::get(2, 2, {d0}, context);
-// IMPL-LABEL:  void Test9Op::regionBuilder(Block &block, ValueRange captures) {
+// IMPL-LABEL:  void Test9Op::regionBuilder(ImplicitLocOpBuilder &b,
+//       IMPL:    Block &block, ValueRange captures) {
 //       IMPL:  Value [[a:.*]](args[0]), [[c:.*]](args[2]);
 ods_def<Test9Op>:
 def test9(A: f32(M, K), B: f32(K)) -> (C: f32(M))
 {
-  C(m) = std_addf<k>(C(m), A(m, k));
+  C(m) = AddFOp<k>(C(m), A(m, k));
 }

diff  --git a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
index 72b7f6fe7dc9..5f107c307161 100644
--- a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
+++ b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
@@ -75,8 +75,8 @@ structured_op: !LinalgStructuredOpConfig
 #  ODS-NEXT:      TypeRange(inputs),
 #  ODS-NEXT:      TypeRange(outputs)
 
-# IMPL-LABEL:  void Test1Op::regionBuilder
-#  IMPL-SAME:  (Block &block, ValueRange captures)
+# IMPL-LABEL:  void Test1Op::regionBuilder(
+#       IMPL:    ImplicitLocOpBuilder &b, Block &block, ValueRange captures)
 #       IMPL:  Value [[VAL0:[a-z0-9]+]] = helper.constant("42 : i64");
 #   IMPL-DAG:  Value [[VAL1:[a-z0-9]+]] = helper.cast(block.getArgument(0).getType(), [[VAL0]]);
 #   IMPL-DAG:  Value [[VAL2:[a-z0-9]+]] = helper.index(1);
@@ -133,5 +133,6 @@ structured_op: !LinalgStructuredOpConfig
 #       IMPL:  "affine_map<(d0, d1)[s0, s1] -> (d1, d0)>"
 #       IMPL:  "affine_map<(d0, d1)[s0, s1] -> (d0, d1)>"
 
-#       IMPL:  void Test2Op::regionBuilder(Block &block, ValueRange captures)
+#       IMPL:  void Test2Op::regionBuilder(
+#       IMPL:    ImplicitLocOpBuilder &b, Block &block, ValueRange captures)
 #       IMPL:  yields.push_back(block.getArgument(0));

diff  --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp
index 755c92fd7caa..71fb4364ac6a 100644
--- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp
+++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp
@@ -72,6 +72,30 @@ using llvm::Twine;
 
 using namespace mlir;
 
+//===----------------------------------------------------------------------===//
+// Special "op aliases" substitutions.
+//===----------------------------------------------------------------------===//
+
+/// Perform substitutions of known special ops.
+/// This is a poor man's way of achieving "op aliases": i.e. giving an op a
+/// name.
+/// This is hacky and temporary until migration to the python opdsl is complete.
+static void substituteOpAliases(std::string &expressionsStr) {
+  for (auto kvp : SmallVector<std::pair<std::string, std::string>>{
+           {"b.create<CmpIOpSGT>(", "b.create<CmpIOp>(CmpIPredicate::sgt, "},
+           {"b.create<CmpFOpOGT>(", "b.create<CmpFOp>(CmpFPredicate::OGT, "},
+           {"b.create<CmpFOpOLT>(", "b.create<CmpFOp>(CmpFPredicate::OLT, "},
+           {"b.create<SignExtendIOp32>(",
+            "b.create<SignExtendIOp>(b.getI32Type(), "},
+       }) {
+    size_t pos = 0;
+    while ((pos = expressionsStr.find(kvp.first, pos)) != std::string::npos) {
+      expressionsStr.replace(pos, kvp.first.size(), kvp.second);
+      pos += kvp.second.size();
+    }
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // Lexer
 //===----------------------------------------------------------------------===//
@@ -1941,8 +1965,10 @@ void TCParser::printODS(llvm::raw_ostream &os, StringRef cppOpName,
         // Auto-generated.
         ArrayAttr iterator_types();
         ArrayAttr indexing_maps();
-        static void regionBuilder(Block &block, ValueRange captures);
-        static std::function<void(Block &, ValueRange)> getRegionBuilder() {{
+        static void regionBuilder(ImplicitLocOpBuilder &b,
+                                  Block &block, ValueRange captures);
+        static std::function<void(ImplicitLocOpBuilder &b,
+                                  Block &, ValueRange)> getRegionBuilder() {{
           return regionBuilder;
         }
 
@@ -2325,7 +2351,7 @@ void TCParser::printRegionBuilder(llvm::raw_ostream &os, StringRef cppOpName,
                               printExpr(subExprsStringStream, *e);
                             });
       subExprsStringStream.flush();
-      const char *tensorExprFmt = "\n    Value _{0} = {1}({2});";
+      const char *tensorExprFmt = "\n    Value _{0} = b.create<{1}>({2});";
       os << llvm::formatv(tensorExprFmt, ++count, pTensorExpr->operationName,
                           subExprs);
       subExprsMap[pTensorExpr] = count;
@@ -2333,13 +2359,12 @@ void TCParser::printRegionBuilder(llvm::raw_ostream &os, StringRef cppOpName,
   };
 
   const char *regionBuilderFmt = R"FMT(
-  void {0}::regionBuilder(Block &block, ValueRange captures) {
-    using namespace edsc;
-    using namespace intrinsics;
+  void {0}::regionBuilder(ImplicitLocOpBuilder &b,
+                          Block &block, ValueRange captures) {
     auto args = block.getArguments();
     Value {1};
     {2}
-    (linalg_yield(ValueRange{ {3} }));
+    b.create<linalg::YieldOp>(ValueRange{ {3} });
   })FMT";
 
   std::string valueHandleStr;
@@ -2358,6 +2383,8 @@ void TCParser::printRegionBuilder(llvm::raw_ostream &os, StringRef cppOpName,
       if (e.kind == Expression::Kind::TensorExpr)
         printExpr(expressionStringStream, e);
     });
+  expressionStringStream.flush();
+  substituteOpAliases(expressionsStr);
 
   std::string yieldStr;
   llvm::raw_string_ostream yieldStringStream(yieldStr);
@@ -2367,7 +2394,6 @@ void TCParser::printRegionBuilder(llvm::raw_ostream &os, StringRef cppOpName,
                         });
 
   valueHandleStringStream.flush();
-  expressionStringStream.flush();
   yieldStringStream.flush();
 
   os << llvm::formatv(regionBuilderFmt, cppOpName, valueHandleStr,

diff  --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
index 90c237e4bf7c..318613965ea3 100644
--- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
+++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
@@ -509,8 +509,11 @@ def {0} : LinalgStructuredBase_Op<"{1}", !listconcat([
       // Auto-generated.
       ArrayAttr iterator_types();
       ArrayAttr indexing_maps();
-      static void regionBuilder(Block &block, ValueRange captures);
-      static std::function<void(Block &, ValueRange)> getRegionBuilder() {{
+      static void regionBuilder(
+        ImplicitLocOpBuilder &b, Block &block, ValueRange captures);
+      static std::function<
+        void(ImplicitLocOpBuilder &b, Block &, ValueRange)>
+      getRegionBuilder() {{
         return regionBuilder;
       }
 
@@ -755,7 +758,8 @@ std::string {0}::getLibraryCallName() {{
     // {1}: Number of args
     // {2}: Statements
     static const char structuredOpRegionBuilderFormat[] = R"FMT(
-void {0}::regionBuilder(Block &block, ValueRange captures) {{
+void {0}::regionBuilder(
+    ImplicitLocOpBuilder &b, Block &block, ValueRange captures) {{
   assert({1} > 0 && block.getNumArguments() == {1} &&
          "{0} regionBuilder expects {1} (>=0) args");
   RegionBuilderHelper helper(block.getArgument(0).getContext(), block);


        


More information about the Mlir-commits mailing list