[Mlir-commits] [mlir] 75394e1 - [mlir][EDSC] Almost NFC - Refactor and untangle EDSC dependencies

Nicolas Vasilache llvmlistbot at llvm.org
Mon Feb 10 09:13:46 PST 2020


Author: Nicolas Vasilache
Date: 2020-02-10T12:10:41-05:00
New Revision: 75394e1301dd1d4739a24d440e9996c52e5b5570

URL: https://github.com/llvm/llvm-project/commit/75394e1301dd1d4739a24d440e9996c52e5b5570
DIFF: https://github.com/llvm/llvm-project/commit/75394e1301dd1d4739a24d440e9996c52e5b5570.diff

LOG: [mlir][EDSC] Almost NFC - Refactor and untangle EDSC dependencies

This CL refactors EDSCs to layer them better and break unnecessary
dependencies. After this refactoring, the top-level EDSC target only
depends on IR but not on Dialects anymore and each dialect has its
own EDSC directory.

This simplifies the layering and breaks cyclic dependencies.
In particular, the declarative builder + folder are made explicit and
are now confined to Linalg.

As the refactoring occurred, certain classes and abstractions that were not
paying for themselves have been removed.

Differential Revision: https://reviews.llvm.org/D74302

Added: 
    mlir/include/mlir/Dialect/AffineOps/EDSC/Builders.h
    mlir/include/mlir/Dialect/AffineOps/EDSC/Intrinsics.h
    mlir/include/mlir/Dialect/LoopOps/EDSC/Builders.h
    mlir/include/mlir/Dialect/StandardOps/EDSC/Builders.h
    mlir/include/mlir/Dialect/StandardOps/EDSC/Intrinsics.h
    mlir/lib/Dialect/AffineOps/EDSC/Builders.cpp
    mlir/lib/Dialect/LoopOps/EDSC/Builders.cpp
    mlir/lib/Dialect/StandardOps/EDSC/Builders.cpp
    mlir/lib/Dialect/StandardOps/EDSC/Intrinsics.cpp

Modified: 
    mlir/docs/EDSC.md
    mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
    mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
    mlir/include/mlir/EDSC/Builders.h
    mlir/include/mlir/EDSC/Intrinsics.h
    mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
    mlir/lib/Conversion/VectorToLoops/ConvertVectorToLoops.cpp
    mlir/lib/Dialect/AffineOps/CMakeLists.txt
    mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
    mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
    mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
    mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
    mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/LoopOps/CMakeLists.txt
    mlir/lib/Dialect/StandardOps/CMakeLists.txt
    mlir/lib/EDSC/Builders.cpp
    mlir/lib/EDSC/CMakeLists.txt
    mlir/test/EDSC/builder-api-test.cpp

Removed: 
    mlir/include/mlir/EDSC/Helpers.h
    mlir/lib/EDSC/Helpers.cpp
    mlir/lib/EDSC/Intrinsics.cpp


################################################################################
diff  --git a/mlir/docs/EDSC.md b/mlir/docs/EDSC.md
index 0b84d238358e..162e98e50542 100644
--- a/mlir/docs/EDSC.md
+++ b/mlir/docs/EDSC.md
@@ -21,7 +21,7 @@ IR snippets, as they are built, for programmatic manipulation. Intuitive
 operators are provided to allow concise and idiomatic expressions.
 
 ```c++
-ValueHandle zero = constant_index(0);
+ValueHandle zero = std_constant_index(0);
 IndexHandle i, j, k;
 ```
 
@@ -49,8 +49,8 @@ concise and structured loop nests.
               j(indexType),
               lb(f->getArgument(0)),
               ub(f->getArgument(1));
-  ValueHandle f7(constant_float(llvm::APFloat(7.0f), f32Type)),
-              f13(constant_float(llvm::APFloat(13.0f), f32Type)),
+  ValueHandle f7(std_constant_float(llvm::APFloat(7.0f), f32Type)),
+              f13(std_constant_float(llvm::APFloat(13.0f), f32Type)),
               i7(constant_int(7, 32)),
               i13(constant_int(13, 32));
   AffineLoopNestBuilder(&i, lb, ub, 3)([&]{

diff  --git a/mlir/include/mlir/Dialect/AffineOps/EDSC/Builders.h b/mlir/include/mlir/Dialect/AffineOps/EDSC/Builders.h
new file mode 100644
index 000000000000..c20c8d72ae96
--- /dev/null
+++ b/mlir/include/mlir/Dialect/AffineOps/EDSC/Builders.h
@@ -0,0 +1,141 @@
+//===- Builders.h - MLIR Declarative Builder Classes ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides intuitive composable interfaces for building structured MLIR
+// snippets in a declarative fashion.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_AFFINEOPS_EDSC_BUILDERS_H_
+#define MLIR_DIALECT_AFFINEOPS_EDSC_BUILDERS_H_
+
+#include "mlir/Dialect/AffineOps/AffineOps.h"
+#include "mlir/EDSC/Builders.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Types.h"
+
+namespace mlir {
+namespace edsc {
+
+/// Constructs a new AffineForOp and captures the associated induction
+/// variable. A ValueHandle pointer is passed as the first argument and is the
+/// *only* way to capture the loop induction variable.
+LoopBuilder makeAffineLoopBuilder(ValueHandle *iv,
+                                  ArrayRef<ValueHandle> lbHandles,
+                                  ArrayRef<ValueHandle> ubHandles,
+                                  int64_t step);
+
+/// Explicit nested LoopBuilder. Offers a compressed multi-loop builder to avoid
+/// explicitly writing all the loops in a nest. This simple functionality is
+/// also useful to write rank-agnostic custom ops.
+///
+/// Usage:
+///
+/// ```c++
+///    AffineLoopNestBuilder({&i, &j, &k}, {lb, lb, lb}, {ub, ub, ub}, {1, 1,
+///    1})(
+///      [&](){
+///        ...
+///      });
+/// ```
+///
+/// ```c++
+///    AffineLoopNestBuilder({&i}, {lb}, {ub}, {1})([&](){
+///      AffineLoopNestBuilder({&j}, {lb}, {ub}, {1})([&](){
+///        AffineLoopNestBuilder({&k}, {lb}, {ub}, {1})([&](){
+///          ...
+///        }),
+///      }),
+///    });
+/// ```
+class AffineLoopNestBuilder {
+public:
+  /// This entry point accommodates the fact that AffineForOp implicitly uses
+  /// multiple `lbs` and `ubs` with one single `iv` and `step` to encode `max`
+  /// and and `min` constraints respectively.
+  AffineLoopNestBuilder(ValueHandle *iv, ArrayRef<ValueHandle> lbs,
+                        ArrayRef<ValueHandle> ubs, int64_t step);
+  AffineLoopNestBuilder(ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
+                        ArrayRef<ValueHandle> ubs, ArrayRef<int64_t> steps);
+
+  void operator()(function_ref<void(void)> fun = nullptr);
+
+private:
+  SmallVector<LoopBuilder, 4> loops;
+};
+
+namespace op {
+
+ValueHandle operator+(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator-(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator*(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator/(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator%(ValueHandle lhs, ValueHandle rhs);
+ValueHandle floorDiv(ValueHandle lhs, ValueHandle rhs);
+ValueHandle ceilDiv(ValueHandle lhs, ValueHandle rhs);
+
+ValueHandle operator!(ValueHandle value);
+ValueHandle operator&&(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator||(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator^(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator==(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator!=(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator<(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator<=(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator>(ValueHandle lhs, ValueHandle rhs);
+ValueHandle operator>=(ValueHandle lhs, ValueHandle rhs);
+
+} // namespace op
+
+/// Operator overloadings.
+template <typename Load, typename Store>
+ValueHandle TemplatedIndexedValue<Load, Store>::operator+(ValueHandle e) {
+  using op::operator+;
+  return static_cast<ValueHandle>(*this) + e;
+}
+template <typename Load, typename Store>
+ValueHandle TemplatedIndexedValue<Load, Store>::operator-(ValueHandle e) {
+  using op::operator-;
+  return static_cast<ValueHandle>(*this) - e;
+}
+template <typename Load, typename Store>
+ValueHandle TemplatedIndexedValue<Load, Store>::operator*(ValueHandle e) {
+  using op::operator*;
+  return static_cast<ValueHandle>(*this) * e;
+}
+template <typename Load, typename Store>
+ValueHandle TemplatedIndexedValue<Load, Store>::operator/(ValueHandle e) {
+  using op::operator/;
+  return static_cast<ValueHandle>(*this) / e;
+}
+
+template <typename Load, typename Store>
+OperationHandle TemplatedIndexedValue<Load, Store>::operator+=(ValueHandle e) {
+  using op::operator+;
+  return Store(*this + e, getBase(), {indices.begin(), indices.end()});
+}
+template <typename Load, typename Store>
+OperationHandle TemplatedIndexedValue<Load, Store>::operator-=(ValueHandle e) {
+  using op::operator-;
+  return Store(*this - e, getBase(), {indices.begin(), indices.end()});
+}
+template <typename Load, typename Store>
+OperationHandle TemplatedIndexedValue<Load, Store>::operator*=(ValueHandle e) {
+  using op::operator*;
+  return Store(*this * e, getBase(), {indices.begin(), indices.end()});
+}
+template <typename Load, typename Store>
+OperationHandle TemplatedIndexedValue<Load, Store>::operator/=(ValueHandle e) {
+  using op::operator/;
+  return Store(*this / e, getBase(), {indices.begin(), indices.end()});
+}
+
+} // namespace edsc
+} // namespace mlir
+
+#endif // MLIR_DIALECT_AFFINEOPS_EDSC_BUILDERS_H_

diff  --git a/mlir/include/mlir/Dialect/AffineOps/EDSC/Intrinsics.h b/mlir/include/mlir/Dialect/AffineOps/EDSC/Intrinsics.h
new file mode 100644
index 000000000000..67d4ac16bb0b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/AffineOps/EDSC/Intrinsics.h
@@ -0,0 +1,32 @@
+//===- Intrinsics.h - MLIR EDSC Intrinsics for AffineOps --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_DIALECT_AFFINEOPS_EDSC_INTRINSICS_H_
+#define MLIR_DIALECT_AFFINEOPS_EDSC_INTRINSICS_H_
+
+#include "mlir/Dialect/AffineOps/EDSC/Builders.h"
+#include "mlir/EDSC/Intrinsics.h"
+
+namespace mlir {
+namespace edsc {
+namespace intrinsics {
+
+using affine_apply = ValueBuilder<AffineApplyOp>;
+using affine_if = OperationBuilder<AffineIfOp>;
+using affine_load = ValueBuilder<AffineLoadOp>;
+using affine_min = ValueBuilder<AffineMinOp>;
+using affine_max = ValueBuilder<AffineMaxOp>;
+using affine_store = OperationBuilder<AffineStoreOp>;
+
+/// Provide an index notation around affine_load and affine_store.
+using AffineIndexedValue = TemplatedIndexedValue<affine_load, affine_store>;
+
+} // namespace intrinsics
+} // namespace edsc
+} // namespace mlir
+
+#endif // MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_

diff  --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
index cff93f13cd35..3bd83b433589 100644
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
+++ b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
@@ -13,17 +13,24 @@
 #ifndef MLIR_DIALECT_LINALG_EDSC_BUILDERS_H_
 #define MLIR_DIALECT_LINALG_EDSC_BUILDERS_H_
 
-#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
+#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+// TODO(ntv): Needed for SubViewOp::Range, clean this up.
+#include "mlir/Dialect/StandardOps/Ops.h"
 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
 #include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Intrinsics.h"
-#include "mlir/IR/AffineExpr.h"
-#include "mlir/IR/Builders.h"
 
 namespace mlir {
+class AffineForOp;
 class BlockArgument;
+class SubViewOp;
+
+namespace loop {
+class ParallelOp;
+} // namespace loop
 
 namespace edsc {
+class AffineLoopNestBuilder;
+class ParallelLoopNestBuilder;
 
 /// A LoopRangeBuilder is a generic NestedBuilder for loop.for operations.
 /// More specifically it is meant to be used as a temporary object for
@@ -115,7 +122,6 @@ Operation *makeGenericLinalgOp(
 namespace ops {
 using edsc::StructuredIndexed;
 using edsc::ValueHandle;
-using edsc::intrinsics::linalg_yield;
 
 //===----------------------------------------------------------------------===//
 // EDSC builders for linalg generic operations.

diff  --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
index 02b60b9bc050..98ff016182fa 100644
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
+++ b/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
@@ -8,14 +8,23 @@
 #ifndef MLIR_DIALECT_LINALG_EDSC_INTRINSICS_H_
 #define MLIR_DIALECT_LINALG_EDSC_INTRINSICS_H_
 
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
-#include "mlir/EDSC/Builders.h"
+#include "mlir/Dialect/Linalg/EDSC/Builders.h"
 #include "mlir/EDSC/Intrinsics.h"
+#include "mlir/Transforms/FoldUtils.h"
 
 namespace mlir {
 namespace edsc {
-namespace intrinsics {
 
+template <typename Op, typename... Args>
+ValueHandle ValueHandle::create(OperationFolder *folder, Args... args) {
+  return folder ? ValueHandle(folder->create<Op>(ScopedContext::getBuilder(),
+                                                 ScopedContext::getLocation(),
+                                                 args...))
+                : ValueHandle(ScopedContext::getBuilder().create<Op>(
+                      ScopedContext::getLocation(), args...));
+}
+
+namespace intrinsics {
 using linalg_copy = OperationBuilder<linalg::CopyOp>;
 using linalg_fill = OperationBuilder<linalg::FillOp>;
 using linalg_range = ValueBuilder<linalg::RangeOp>;

diff  --git a/mlir/include/mlir/Dialect/LoopOps/EDSC/Builders.h b/mlir/include/mlir/Dialect/LoopOps/EDSC/Builders.h
new file mode 100644
index 000000000000..41b0e1a972bf
--- /dev/null
+++ b/mlir/include/mlir/Dialect/LoopOps/EDSC/Builders.h
@@ -0,0 +1,68 @@
+//===- Builders.h - MLIR Declarative Builder Classes ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides intuitive composable interfaces for building structured MLIR
+// snippets in a declarative fashion.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_LOOPOPS_EDSC_BUILDERS_H_
+#define MLIR_DIALECT_LOOPOPS_EDSC_BUILDERS_H_
+
+#include "mlir/Dialect/LoopOps/LoopOps.h"
+#include "mlir/EDSC/Builders.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Types.h"
+
+namespace mlir {
+namespace edsc {
+
+/// Constructs a new loop::ParallelOp and captures the associated induction
+/// variables. An array of ValueHandle pointers is passed as the first
+/// argument and is the *only* way to capture loop induction variables.
+LoopBuilder makeParallelLoopBuilder(ArrayRef<ValueHandle *> ivs,
+                                    ArrayRef<ValueHandle> lbHandles,
+                                    ArrayRef<ValueHandle> ubHandles,
+                                    ArrayRef<ValueHandle> steps);
+/// Constructs a new loop::ForOp and captures the associated induction
+/// variable. A ValueHandle pointer is passed as the first argument and is the
+/// *only* way to capture the loop induction variable.
+LoopBuilder makeLoopBuilder(ValueHandle *iv, ValueHandle lbHandle,
+                            ValueHandle ubHandle, ValueHandle stepHandle);
+
+/// Helper class to sugar building loop.parallel loop nests from lower/upper
+/// bounds and step sizes.
+class ParallelLoopNestBuilder {
+public:
+  ParallelLoopNestBuilder(ArrayRef<ValueHandle *> ivs,
+                          ArrayRef<ValueHandle> lbs, ArrayRef<ValueHandle> ubs,
+                          ArrayRef<ValueHandle> steps);
+
+  void operator()(function_ref<void(void)> fun = nullptr);
+
+private:
+  SmallVector<LoopBuilder, 4> loops;
+};
+
+/// Helper class to sugar building loop.for loop nests from ranges.
+/// This is similar to edsc::AffineLoopNestBuilder except it operates on
+/// loop.for.
+class LoopNestBuilder {
+public:
+  LoopNestBuilder(ArrayRef<edsc::ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
+                  ArrayRef<ValueHandle> ubs, ArrayRef<ValueHandle> steps);
+  void operator()(std::function<void(void)> fun = nullptr);
+
+private:
+  SmallVector<LoopBuilder, 4> loops;
+};
+
+} // namespace edsc
+} // namespace mlir
+
+#endif // MLIR_DIALECT_LOOPOPS_EDSC_BUILDERS_H_

diff  --git a/mlir/include/mlir/Dialect/StandardOps/EDSC/Builders.h b/mlir/include/mlir/Dialect/StandardOps/EDSC/Builders.h
new file mode 100644
index 000000000000..e852e8228dfb
--- /dev/null
+++ b/mlir/include/mlir/Dialect/StandardOps/EDSC/Builders.h
@@ -0,0 +1,81 @@
+//===- Builders.h - MLIR EDSC Builders for StandardOps ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_DIALECT_STANDARDOPS_EDSC_BUILDERS_H_
+#define MLIR_DIALECT_STANDARDOPS_EDSC_BUILDERS_H_
+
+#include "mlir/Dialect/StandardOps/Ops.h"
+#include "mlir/EDSC/Builders.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Types.h"
+
+namespace mlir {
+namespace edsc {
+
+/// Base class for MemRefBoundsCapture and VectorBoundsCapture.
+class BoundsCapture {
+public:
+  unsigned rank() const { return lbs.size(); }
+  ValueHandle lb(unsigned idx) { return lbs[idx]; }
+  ValueHandle ub(unsigned idx) { return ubs[idx]; }
+  int64_t step(unsigned idx) { return steps[idx]; }
+  std::tuple<ValueHandle, ValueHandle, int64_t> range(unsigned idx) {
+    return std::make_tuple(lbs[idx], ubs[idx], steps[idx]);
+  }
+  void swapRanges(unsigned i, unsigned j) {
+    if (i == j)
+      return;
+    lbs[i].swap(lbs[j]);
+    ubs[i].swap(ubs[j]);
+    std::swap(steps[i], steps[j]);
+  }
+
+  ArrayRef<ValueHandle> getLbs() { return lbs; }
+  ArrayRef<ValueHandle> getUbs() { return ubs; }
+  ArrayRef<int64_t> getSteps() { return steps; }
+
+protected:
+  SmallVector<ValueHandle, 8> lbs;
+  SmallVector<ValueHandle, 8> ubs;
+  SmallVector<int64_t, 8> steps;
+};
+
+/// A MemRefBoundsCapture represents the information required to step through a
+/// MemRef. It has placeholders for non-contiguous tensors that fit within the
+/// Fortran subarray model.
+/// At the moment it can only capture a MemRef with an identity layout map.
+// TODO(ntv): Support MemRefs with layoutMaps.
+class MemRefBoundsCapture : public BoundsCapture {
+public:
+  explicit MemRefBoundsCapture(Value v);
+  MemRefBoundsCapture(const MemRefBoundsCapture &) = default;
+  MemRefBoundsCapture &operator=(const MemRefBoundsCapture &) = default;
+
+  unsigned fastestVarying() const { return rank() - 1; }
+
+private:
+  ValueHandle base;
+};
+
+/// A VectorBoundsCapture represents the information required to step through a
+/// Vector accessing each scalar element at a time. It is the counterpart of
+/// a MemRefBoundsCapture but for vectors. This exists purely for boilerplate
+/// avoidance.
+class VectorBoundsCapture : public BoundsCapture {
+public:
+  explicit VectorBoundsCapture(Value v);
+  VectorBoundsCapture(const VectorBoundsCapture &) = default;
+  VectorBoundsCapture &operator=(const VectorBoundsCapture &) = default;
+
+private:
+  ValueHandle base;
+};
+
+} // namespace edsc
+} // namespace mlir
+
+#endif // MLIR_DIALECT_STANDARDOPS_EDSC_BUILDERS_H_

diff  --git a/mlir/include/mlir/Dialect/StandardOps/EDSC/Intrinsics.h b/mlir/include/mlir/Dialect/StandardOps/EDSC/Intrinsics.h
new file mode 100644
index 000000000000..1c73cb5aa628
--- /dev/null
+++ b/mlir/include/mlir/Dialect/StandardOps/EDSC/Intrinsics.h
@@ -0,0 +1,141 @@
+//===- Intrinsics.h - MLIR EDSC Intrinsics for StandardOps ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_
+#define MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_
+
+#include "mlir/Dialect/StandardOps/EDSC/Builders.h"
+#include "mlir/EDSC/Intrinsics.h"
+
+namespace mlir {
+namespace edsc {
+namespace intrinsics {
+namespace folded {
+/// Helper variadic abstraction to allow extending to any MLIR op without
+/// boilerplate or Tablegen.
+/// Arguably a builder is not a ValueHandle but in practice it is only used as
+/// an alias to a notional ValueHandle<Op>.
+/// Implementing it as a subclass allows it to compose all the way to Value.
+/// Without subclassing, implicit conversion to Value would fail when composing
+/// in patterns such as: `select(a, b, select(c, d, e))`.
+template <typename Op>
+struct ValueBuilder : public ValueHandle {
+  /// Folder-based
+  template <typename... Args>
+  ValueBuilder(OperationFolder *folder, Args... args)
+      : ValueHandle(ValueHandle::create<Op>(folder, detail::unpack(args)...)) {}
+  ValueBuilder(OperationFolder *folder, ArrayRef<ValueHandle> vs)
+      : ValueBuilder(ValueBuilder::create<Op>(folder, detail::unpack(vs))) {}
+  template <typename... Args>
+  ValueBuilder(OperationFolder *folder, ArrayRef<ValueHandle> vs, Args... args)
+      : ValueHandle(ValueHandle::create<Op>(folder, detail::unpack(vs),
+                                            detail::unpack(args)...)) {}
+  template <typename T, typename... Args>
+  ValueBuilder(OperationFolder *folder, T t, ArrayRef<ValueHandle> vs,
+               Args... args)
+      : ValueHandle(ValueHandle::create<Op>(folder, detail::unpack(t),
+                                            detail::unpack(vs),
+                                            detail::unpack(args)...)) {}
+  template <typename T1, typename T2, typename... Args>
+  ValueBuilder(OperationFolder *folder, T1 t1, T2 t2, ArrayRef<ValueHandle> vs,
+               Args... args)
+      : ValueHandle(ValueHandle::create<Op>(
+            folder, detail::unpack(t1), detail::unpack(t2), detail::unpack(vs),
+            detail::unpack(args)...)) {}
+};
+} // namespace folded
+
+using std_addf = ValueBuilder<AddFOp>;
+using std_alloc = ValueBuilder<AllocOp>;
+using std_call = OperationBuilder<CallOp>;
+using std_constant_float = ValueBuilder<ConstantFloatOp>;
+using std_constant_index = ValueBuilder<ConstantIndexOp>;
+using std_constant_int = ValueBuilder<ConstantIntOp>;
+using std_dealloc = OperationBuilder<DeallocOp>;
+using std_dim = ValueBuilder<DimOp>;
+using std_muli = ValueBuilder<MulIOp>;
+using std_mulf = ValueBuilder<MulFOp>;
+using std_memref_cast = ValueBuilder<MemRefCastOp>;
+using std_ret = OperationBuilder<ReturnOp>;
+using std_select = ValueBuilder<SelectOp>;
+using std_load = ValueBuilder<LoadOp>;
+using std_store = OperationBuilder<StoreOp>;
+using std_subi = ValueBuilder<SubIOp>;
+using std_tanh = ValueBuilder<TanhOp>;
+using std_view = ValueBuilder<ViewOp>;
+using std_zero_extendi = ValueBuilder<ZeroExtendIOp>;
+using std_sign_extendi = ValueBuilder<SignExtendIOp>;
+
+/// Branches into the mlir::Block* captured by BlockHandle `b` with `operands`.
+///
+/// Prerequisites:
+///   All Handles have already captured previously constructed IR objects.
+OperationHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
+
+/// Creates a new mlir::Block* and branches to it from the current block.
+/// Argument types are specified by `operands`.
+/// Captures the new block in `bh` and the actual `operands` in `captures`. To
+/// insert the new mlir::Block*, a local ScopedContext is constructed and
+/// released to the current block. The branch operation is then added to the
+/// new block.
+///
+/// Prerequisites:
+///   `b` has not yet captured an mlir::Block*.
+///   No `captures` have captured any mlir::Value.
+///   All `operands` have already captured an mlir::Value
+///   captures.size() == operands.size()
+///   captures and operands are pairwise of the same type.
+OperationHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
+                   ArrayRef<ValueHandle> operands);
+
+/// Branches into the mlir::Block* captured by BlockHandle `trueBranch` with
+/// `trueOperands` if `cond` evaluates to `true` (resp. `falseBranch` and
+/// `falseOperand` if `cond` evaluates to `false`).
+///
+/// Prerequisites:
+///   All Handles have captured previously constructed IR objects.
+OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
+                        ArrayRef<ValueHandle> trueOperands,
+                        BlockHandle falseBranch,
+                        ArrayRef<ValueHandle> falseOperands);
+
+/// Eagerly creates new mlir::Block* with argument types specified by
+/// `trueOperands`/`falseOperands`.
+/// Captures the new blocks in `trueBranch`/`falseBranch` and the arguments in
+/// `trueCaptures/falseCaptures`.
+/// To insert the new mlir::Block*, a local ScopedContext is constructed and
+/// released. The branch operation is then added in the original location and
+/// targeting the eagerly constructed blocks.
+///
+/// Prerequisites:
+///   `trueBranch`/`falseBranch` has not yet captured an mlir::Block*.
+///   No `trueCaptures`/`falseCaptures` have captured any mlir::Value.
+///   All `trueOperands`/`trueOperands` have already captured an mlir::Value
+///   `trueCaptures`.size() == `trueOperands`.size()
+///   `falseCaptures`.size() == `falseOperands`.size()
+///   `trueCaptures` and `trueOperands` are pairwise of the same type
+///   `falseCaptures` and `falseOperands` are pairwise of the same type.
+OperationHandle cond_br(ValueHandle cond, BlockHandle *trueBranch,
+                        ArrayRef<ValueHandle *> trueCaptures,
+                        ArrayRef<ValueHandle> trueOperands,
+                        BlockHandle *falseBranch,
+                        ArrayRef<ValueHandle *> falseCaptures,
+                        ArrayRef<ValueHandle> falseOperands);
+
+/// Provide an index notation around sdt_load and std_store.
+using StdIndexedValue =
+    TemplatedIndexedValue<intrinsics::std_load, intrinsics::std_store>;
+
+using folded_std_constant_index = folded::ValueBuilder<ConstantIndexOp>;
+using folded_std_constant_float = folded::ValueBuilder<ConstantFloatOp>;
+using folded_std_dim = folded::ValueBuilder<DimOp>;
+using folded_std_muli = folded::ValueBuilder<MulIOp>;
+} // namespace intrinsics
+} // namespace edsc
+} // namespace mlir
+
+#endif // MLIR_DIALECT_STANDARDOPS_EDSC_INTRINSICS_H_

diff  --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h
index ab40e333a2c5..dafa09bc6628 100644
--- a/mlir/include/mlir/EDSC/Builders.h
+++ b/mlir/include/mlir/EDSC/Builders.h
@@ -14,23 +14,15 @@
 #ifndef MLIR_EDSC_BUILDERS_H_
 #define MLIR_EDSC_BUILDERS_H_
 
-#include "mlir/Dialect/AffineOps/AffineOps.h"
-#include "mlir/Dialect/LoopOps/LoopOps.h"
-#include "mlir/Dialect/StandardOps/Ops.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/Builders.h"
-#include "mlir/Transforms/FoldUtils.h"
+#include "mlir/IR/StandardTypes.h"
+#include "mlir/IR/Types.h"
 
 namespace mlir {
+class OperationFolder;
 
 namespace edsc {
-
-struct index_type {
-  explicit index_type(int64_t v) : v(v) {}
-  explicit operator int64_t() { return v; }
-  int64_t v;
-};
-
 class BlockHandle;
 class CapturableHandle;
 class NestedBuilder;
@@ -150,24 +142,6 @@ class NestedBuilder {
 /// the name LoopBuilder (as opposed to say ForBuilder or AffineForBuilder).
 class LoopBuilder : public NestedBuilder {
 public:
-  /// Constructs a new AffineForOp and captures the associated induction
-  /// variable. A ValueHandle pointer is passed as the first argument and is the
-  /// *only* way to capture the loop induction variable.
-  static LoopBuilder makeAffine(ValueHandle *iv,
-                                ArrayRef<ValueHandle> lbHandles,
-                                ArrayRef<ValueHandle> ubHandles, int64_t step);
-  /// Constructs a new loop::ParallelOp and captures the associated induction
-  /// variables. An array of ValueHandle pointers is passed as the first
-  /// argument and is the *only* way to capture loop induction variables.
-  static LoopBuilder makeParallel(ArrayRef<ValueHandle *> ivs,
-                                  ArrayRef<ValueHandle> lbHandles,
-                                  ArrayRef<ValueHandle> ubHandles,
-                                  ArrayRef<ValueHandle> steps);
-  /// Constructs a new loop::ForOp and captures the associated induction
-  /// variable. A ValueHandle pointer is passed as the first argument and is the
-  /// *only* way to capture the loop induction variable.
-  static LoopBuilder makeLoop(ValueHandle *iv, ValueHandle lbHandle,
-                              ValueHandle ubHandle, ValueHandle stepHandle);
   LoopBuilder(const LoopBuilder &) = delete;
   LoopBuilder(LoopBuilder &&) = default;
 
@@ -181,72 +155,18 @@ class LoopBuilder : public NestedBuilder {
 
 private:
   LoopBuilder() = default;
-};
-
-/// Explicit nested LoopBuilder. Offers a compressed multi-loop builder to avoid
-/// explicitly writing all the loops in a nest. This simple functionality is
-/// also useful to write rank-agnostic custom ops.
-///
-/// Usage:
-///
-/// ```c++
-///    AffineLoopNestBuilder({&i, &j, &k}, {lb, lb, lb}, {ub, ub, ub}, {1, 1,
-///    1})(
-///      [&](){
-///        ...
-///      });
-/// ```
-///
-/// ```c++
-///    AffineLoopNestBuilder({&i}, {lb}, {ub}, {1})([&](){
-///      AffineLoopNestBuilder({&j}, {lb}, {ub}, {1})([&](){
-///        AffineLoopNestBuilder({&k}, {lb}, {ub}, {1})([&](){
-///          ...
-///        }),
-///      }),
-///    });
-/// ```
-class AffineLoopNestBuilder {
-public:
-  // This entry point accommodates the fact that AffineForOp implicitly uses
-  // multiple `lbs` and `ubs` with one single `iv` and `step` to encode `max`
-  // and and `min` constraints respectively.
-  AffineLoopNestBuilder(ValueHandle *iv, ArrayRef<ValueHandle> lbs,
-                        ArrayRef<ValueHandle> ubs, int64_t step);
-  AffineLoopNestBuilder(ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
-                        ArrayRef<ValueHandle> ubs, ArrayRef<int64_t> steps);
-
-  void operator()(function_ref<void(void)> fun = nullptr);
-
-private:
-  SmallVector<LoopBuilder, 4> loops;
-};
 
-/// Helper class to sugar building loop.parallel loop nests from lower/upper
-/// bounds and step sizes.
-class ParallelLoopNestBuilder {
-public:
-  ParallelLoopNestBuilder(ArrayRef<ValueHandle *> ivs,
-                          ArrayRef<ValueHandle> lbs, ArrayRef<ValueHandle> ubs,
-                          ArrayRef<ValueHandle> steps);
-
-  void operator()(function_ref<void(void)> fun = nullptr);
-
-private:
-  SmallVector<LoopBuilder, 4> loops;
-};
-
-/// Helper class to sugar building loop.for loop nests from ranges.
-/// This is similar to edsc::AffineLoopNestBuilder except it operates on
-/// loop.for.
-class LoopNestBuilder {
-public:
-  LoopNestBuilder(ArrayRef<edsc::ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
-                  ArrayRef<ValueHandle> ubs, ArrayRef<ValueHandle> steps);
-  void operator()(std::function<void(void)> fun = nullptr);
-
-private:
-  SmallVector<LoopBuilder, 4> loops;
+  friend LoopBuilder makeAffineLoopBuilder(ValueHandle *iv,
+                                           ArrayRef<ValueHandle> lbHandles,
+                                           ArrayRef<ValueHandle> ubHandles,
+                                           int64_t step);
+  friend LoopBuilder makeParallelLoopBuilder(ArrayRef<ValueHandle *> ivs,
+                                             ArrayRef<ValueHandle> lbHandles,
+                                             ArrayRef<ValueHandle> ubHandles,
+                                             ArrayRef<ValueHandle> steps);
+  friend LoopBuilder makeLoopBuilder(ValueHandle *iv, ValueHandle lbHandle,
+                                     ValueHandle ubHandle,
+                                     ValueHandle stepHandle);
 };
 
 // This class exists solely to handle the C++ vexing parse case when
@@ -337,13 +257,6 @@ class ValueHandle : public CapturableHandle {
   /// been constructed in the past and that is captured "now" in the program.
   explicit ValueHandle(Value v) : t(v.getType()), v(v) {}
 
-  /// Builds a ConstantIndexOp of value `cst`. The constant is created at the
-  /// current insertion point.
-  /// This implicit constructor is provided to each build an eager Value for a
-  /// constant at the current insertion point in the IR. An implicit constructor
-  /// allows idiomatic expressions mixing ValueHandle and literals.
-  ValueHandle(index_type cst);
-
   /// ValueHandle is a value type, use the default copy constructor.
   ValueHandle(const ValueHandle &other) = default;
 
@@ -377,11 +290,6 @@ class ValueHandle : public CapturableHandle {
   template <typename Op, typename... Args>
   static ValueHandle create(OperationFolder *folder, Args... args);
 
-  /// Special case to build composed AffineApply operations.
-  // TODO: createOrFold when available and move inside of the `create` method.
-  static ValueHandle createComposedAffineApply(AffineMap map,
-                                               ArrayRef<Value> operands);
-
   /// Generic create for a named operation producing a single value.
   static ValueHandle create(StringRef name, ArrayRef<ValueHandle> operands,
                             ArrayRef<Type> resultTypes,
@@ -401,6 +309,12 @@ class ValueHandle : public CapturableHandle {
     return v.getDefiningOp();
   }
 
+  // Return a vector of fresh ValueHandles that have not captured.
+  static SmallVector<ValueHandle, 8> makeIndexHandles(unsigned count) {
+    auto indexType = IndexType::get(ScopedContext::getContext());
+    return SmallVector<ValueHandle, 8>(count, ValueHandle(indexType));
+  }
+
 protected:
   ValueHandle() : t(), v(nullptr) {}
 
@@ -555,48 +469,11 @@ ValueHandle ValueHandle::create(Args... args) {
   Operation *op = ScopedContext::getBuilder()
                       .create<Op>(ScopedContext::getLocation(), args...)
                       .getOperation();
-  if (op->getNumResults() == 1) {
+  if (op->getNumResults() == 1)
     return ValueHandle(op->getResult(0));
-  } else if (op->getNumResults() == 0) {
-    if (auto f = dyn_cast<AffineForOp>(op)) {
-      return ValueHandle(f.getInductionVar());
-    }
-  }
   llvm_unreachable("unsupported operation, use an OperationHandle instead");
 }
 
-template <typename Op, typename... Args>
-ValueHandle ValueHandle::create(OperationFolder *folder, Args... args) {
-  return folder ? ValueHandle(folder->create<Op>(ScopedContext::getBuilder(),
-                                                 ScopedContext::getLocation(),
-                                                 args...))
-                : ValueHandle(ScopedContext::getBuilder().create<Op>(
-                      ScopedContext::getLocation(), args...));
-}
-
-namespace op {
-
-ValueHandle operator+(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator-(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator*(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator/(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator%(ValueHandle lhs, ValueHandle rhs);
-ValueHandle floorDiv(ValueHandle lhs, ValueHandle rhs);
-ValueHandle ceilDiv(ValueHandle lhs, ValueHandle rhs);
-
-ValueHandle operator!(ValueHandle value);
-ValueHandle operator&&(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator||(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator^(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator==(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator!=(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator<(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator<=(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator>(ValueHandle lhs, ValueHandle rhs);
-ValueHandle operator>=(ValueHandle lhs, ValueHandle rhs);
-
-} // namespace op
-
 /// Entry point to build multiple ValueHandle from a `Container` of Value or
 /// Type.
 template <typename Container>
@@ -608,6 +485,105 @@ inline SmallVector<ValueHandle, 8> makeValueHandles(Container values) {
   return res;
 }
 
+/// A TemplatedIndexedValue brings an index notation over the template Load and
+/// Store parameters. Assigning to an IndexedValue emits an actual `Store`
+/// operation, while converting an IndexedValue to a ValueHandle emits an actual
+/// `Load` operation.
+template <typename Load, typename Store> class TemplatedIndexedValue {
+public:
+  explicit TemplatedIndexedValue(Type t) : base(t) {}
+  explicit TemplatedIndexedValue(Value v)
+      : TemplatedIndexedValue(ValueHandle(v)) {}
+  explicit TemplatedIndexedValue(ValueHandle v) : base(v) {}
+
+  TemplatedIndexedValue(const TemplatedIndexedValue &rhs) = default;
+
+  TemplatedIndexedValue operator()() { return *this; }
+  /// Returns a new `TemplatedIndexedValue`.
+  TemplatedIndexedValue operator()(ValueHandle index) {
+    TemplatedIndexedValue res(base);
+    res.indices.push_back(index);
+    return res;
+  }
+  template <typename... Args>
+  TemplatedIndexedValue operator()(ValueHandle index, Args... indices) {
+    return TemplatedIndexedValue(base, index).append(indices...);
+  }
+  TemplatedIndexedValue operator()(ArrayRef<ValueHandle> indices) {
+    return TemplatedIndexedValue(base, indices);
+  }
+
+  /// Emits a `store`.
+  OperationHandle operator=(const TemplatedIndexedValue &rhs) {
+    ValueHandle rrhs(rhs);
+    return Store(rrhs, getBase(), {indices.begin(), indices.end()});
+  }
+  OperationHandle operator=(ValueHandle rhs) {
+    return Store(rhs, getBase(), {indices.begin(), indices.end()});
+  }
+
+  /// Emits a `load` when converting to a ValueHandle.
+  operator ValueHandle() const {
+    return Load(getBase(), {indices.begin(), indices.end()});
+  }
+
+  /// Emits a `load` when converting to a Value.
+  Value operator*(void) const {
+    return Load(getBase(), {indices.begin(), indices.end()}).getValue();
+  }
+
+  ValueHandle getBase() const { return base; }
+
+  /// Operator overloadings.
+  ValueHandle operator+(ValueHandle e);
+  ValueHandle operator-(ValueHandle e);
+  ValueHandle operator*(ValueHandle e);
+  ValueHandle operator/(ValueHandle e);
+  OperationHandle operator+=(ValueHandle e);
+  OperationHandle operator-=(ValueHandle e);
+  OperationHandle operator*=(ValueHandle e);
+  OperationHandle operator/=(ValueHandle e);
+  ValueHandle operator+(TemplatedIndexedValue e) {
+    return *this + static_cast<ValueHandle>(e);
+  }
+  ValueHandle operator-(TemplatedIndexedValue e) {
+    return *this - static_cast<ValueHandle>(e);
+  }
+  ValueHandle operator*(TemplatedIndexedValue e) {
+    return *this * static_cast<ValueHandle>(e);
+  }
+  ValueHandle operator/(TemplatedIndexedValue e) {
+    return *this / static_cast<ValueHandle>(e);
+  }
+  OperationHandle operator+=(TemplatedIndexedValue e) {
+    return this->operator+=(static_cast<ValueHandle>(e));
+  }
+  OperationHandle operator-=(TemplatedIndexedValue e) {
+    return this->operator-=(static_cast<ValueHandle>(e));
+  }
+  OperationHandle operator*=(TemplatedIndexedValue e) {
+    return this->operator*=(static_cast<ValueHandle>(e));
+  }
+  OperationHandle operator/=(TemplatedIndexedValue e) {
+    return this->operator/=(static_cast<ValueHandle>(e));
+  }
+
+private:
+  TemplatedIndexedValue(ValueHandle base, ArrayRef<ValueHandle> indices)
+      : base(base), indices(indices.begin(), indices.end()) {}
+
+  TemplatedIndexedValue &append() { return *this; }
+
+  template <typename T, typename... Args>
+  TemplatedIndexedValue &append(T index, Args... indices) {
+    this->indices.push_back(static_cast<ValueHandle>(index));
+    append(indices...);
+    return *this;
+  }
+  ValueHandle base;
+  SmallVector<ValueHandle, 8> indices;
+};
+
 } // namespace edsc
 } // namespace mlir
 

diff  --git a/mlir/include/mlir/EDSC/Helpers.h b/mlir/include/mlir/EDSC/Helpers.h
deleted file mode 100644
index 1750b24f8713..000000000000
--- a/mlir/include/mlir/EDSC/Helpers.h
+++ /dev/null
@@ -1,258 +0,0 @@
-//===- Helpers.h - MLIR Declarative Helper Functionality --------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Provides helper classes and syntactic sugar for declarative builders.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MLIR_EDSC_HELPERS_H_
-#define MLIR_EDSC_HELPERS_H_
-
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Intrinsics.h"
-
-namespace mlir {
-namespace edsc {
-
-// A TemplatedIndexedValue brings an index notation over the template Load and
-// Store parameters.
-template <typename Load, typename Store> class TemplatedIndexedValue;
-
-// By default, edsc::IndexedValue provides an index notation around the affine
-// load and stores. edsc::StdIndexedValue provides the standard load/store
-// counterpart.
-using IndexedValue =
-    TemplatedIndexedValue<intrinsics::affine_load, intrinsics::affine_store>;
-using StdIndexedValue =
-    TemplatedIndexedValue<intrinsics::std_load, intrinsics::std_store>;
-
-// Base class for MemRefView and VectorView.
-class View {
-public:
-  unsigned rank() const { return lbs.size(); }
-  ValueHandle lb(unsigned idx) { return lbs[idx]; }
-  ValueHandle ub(unsigned idx) { return ubs[idx]; }
-  int64_t step(unsigned idx) { return steps[idx]; }
-  std::tuple<ValueHandle, ValueHandle, int64_t> range(unsigned idx) {
-    return std::make_tuple(lbs[idx], ubs[idx], steps[idx]);
-  }
-  void swapRanges(unsigned i, unsigned j) {
-    if (i == j)
-      return;
-    lbs[i].swap(lbs[j]);
-    ubs[i].swap(ubs[j]);
-    std::swap(steps[i], steps[j]);
-  }
-
-  ArrayRef<ValueHandle> getLbs() { return lbs; }
-  ArrayRef<ValueHandle> getUbs() { return ubs; }
-  ArrayRef<int64_t> getSteps() { return steps; }
-
-protected:
-  SmallVector<ValueHandle, 8> lbs;
-  SmallVector<ValueHandle, 8> ubs;
-  SmallVector<int64_t, 8> steps;
-};
-
-/// A MemRefView represents the information required to step through a
-/// MemRef. It has placeholders for non-contiguous tensors that fit within the
-/// Fortran subarray model.
-/// At the moment it can only capture a MemRef with an identity layout map.
-// TODO(ntv): Support MemRefs with layoutMaps.
-class MemRefView : public View {
-public:
-  explicit MemRefView(Value v);
-  MemRefView(const MemRefView &) = default;
-  MemRefView &operator=(const MemRefView &) = default;
-
-  unsigned fastestVarying() const { return rank() - 1; }
-
-private:
-  friend IndexedValue;
-  ValueHandle base;
-};
-
-/// A VectorView represents the information required to step through a
-/// Vector accessing each scalar element at a time. It is the counterpart of
-/// a MemRefView but for vectors. This exists purely for boilerplate avoidance.
-class VectorView : public View {
-public:
-  explicit VectorView(Value v);
-  VectorView(const VectorView &) = default;
-  VectorView &operator=(const VectorView &) = default;
-
-private:
-  friend IndexedValue;
-  ValueHandle base;
-};
-
-/// A TemplatedIndexedValue brings an index notation over the template Load and
-/// Store parameters. This helper class is an abstraction purely for sugaring
-/// purposes and allows writing compact expressions such as:
-///
-/// ```mlir
-///    // `IndexedValue` provided by default in the mlir::edsc namespace.
-///    using IndexedValue =
-///      TemplatedIndexedValue<intrinsics::load, intrinsics::store>;
-///    IndexedValue A(...), B(...), C(...);
-///    For(ivs, zeros, shapeA, ones, {
-///      C(ivs) = A(ivs) + B(ivs)
-///    });
-/// ```
-///
-/// Assigning to an IndexedValue emits an actual `Store` operation, while
-/// converting an IndexedValue to a ValueHandle emits an actual `Load`
-/// operation.
-template <typename Load, typename Store> class TemplatedIndexedValue {
-public:
-  explicit TemplatedIndexedValue(Type t) : base(t) {}
-  explicit TemplatedIndexedValue(Value v)
-      : TemplatedIndexedValue(ValueHandle(v)) {}
-  explicit TemplatedIndexedValue(ValueHandle v) : base(v) {}
-
-  TemplatedIndexedValue(const TemplatedIndexedValue &rhs) = default;
-
-  TemplatedIndexedValue operator()() { return *this; }
-  /// Returns a new `TemplatedIndexedValue`.
-  TemplatedIndexedValue operator()(ValueHandle index) {
-    TemplatedIndexedValue res(base);
-    res.indices.push_back(index);
-    return res;
-  }
-  template <typename... Args>
-  TemplatedIndexedValue operator()(ValueHandle index, Args... indices) {
-    return TemplatedIndexedValue(base, index).append(indices...);
-  }
-  TemplatedIndexedValue operator()(ArrayRef<ValueHandle> indices) {
-    return TemplatedIndexedValue(base, indices);
-  }
-  TemplatedIndexedValue operator()(ArrayRef<IndexHandle> indices) {
-    return TemplatedIndexedValue(
-        base, ArrayRef<ValueHandle>(indices.begin(), indices.end()));
-  }
-
-  /// Emits a `store`.
-  // NOLINTNEXTLINE: unconventional-assign-operator
-  OperationHandle operator=(const TemplatedIndexedValue &rhs) {
-    ValueHandle rrhs(rhs);
-    return Store(rrhs, getBase(), {indices.begin(), indices.end()});
-  }
-  // NOLINTNEXTLINE: unconventional-assign-operator
-  OperationHandle operator=(ValueHandle rhs) {
-    return Store(rhs, getBase(), {indices.begin(), indices.end()});
-  }
-
-  /// Emits a `load` when converting to a ValueHandle.
-  operator ValueHandle() const {
-    return Load(getBase(), {indices.begin(), indices.end()});
-  }
-
-  /// Emits a `load` when converting to a Value.
-  Value operator*(void) const {
-    return Load(getBase(), {indices.begin(), indices.end()}).getValue();
-  }
-
-  ValueHandle getBase() const { return base; }
-
-  /// Operator overloadings.
-  ValueHandle operator+(ValueHandle e);
-  ValueHandle operator-(ValueHandle e);
-  ValueHandle operator*(ValueHandle e);
-  ValueHandle operator/(ValueHandle e);
-  OperationHandle operator+=(ValueHandle e);
-  OperationHandle operator-=(ValueHandle e);
-  OperationHandle operator*=(ValueHandle e);
-  OperationHandle operator/=(ValueHandle e);
-  ValueHandle operator+(TemplatedIndexedValue e) {
-    return *this + static_cast<ValueHandle>(e);
-  }
-  ValueHandle operator-(TemplatedIndexedValue e) {
-    return *this - static_cast<ValueHandle>(e);
-  }
-  ValueHandle operator*(TemplatedIndexedValue e) {
-    return *this * static_cast<ValueHandle>(e);
-  }
-  ValueHandle operator/(TemplatedIndexedValue e) {
-    return *this / static_cast<ValueHandle>(e);
-  }
-  OperationHandle operator+=(TemplatedIndexedValue e) {
-    return this->operator+=(static_cast<ValueHandle>(e));
-  }
-  OperationHandle operator-=(TemplatedIndexedValue e) {
-    return this->operator-=(static_cast<ValueHandle>(e));
-  }
-  OperationHandle operator*=(TemplatedIndexedValue e) {
-    return this->operator*=(static_cast<ValueHandle>(e));
-  }
-  OperationHandle operator/=(TemplatedIndexedValue e) {
-    return this->operator/=(static_cast<ValueHandle>(e));
-  }
-
-private:
-  TemplatedIndexedValue(ValueHandle base, ArrayRef<ValueHandle> indices)
-      : base(base), indices(indices.begin(), indices.end()) {}
-
-  TemplatedIndexedValue &append() { return *this; }
-
-  template <typename T, typename... Args>
-  TemplatedIndexedValue &append(T index, Args... indices) {
-    this->indices.push_back(static_cast<ValueHandle>(index));
-    append(indices...);
-    return *this;
-  }
-  ValueHandle base;
-  SmallVector<ValueHandle, 8> indices;
-};
-
-/// Operator overloadings.
-template <typename Load, typename Store>
-ValueHandle TemplatedIndexedValue<Load, Store>::operator+(ValueHandle e) {
-  using op::operator+;
-  return static_cast<ValueHandle>(*this) + e;
-}
-template <typename Load, typename Store>
-ValueHandle TemplatedIndexedValue<Load, Store>::operator-(ValueHandle e) {
-  using op::operator-;
-  return static_cast<ValueHandle>(*this) - e;
-}
-template <typename Load, typename Store>
-ValueHandle TemplatedIndexedValue<Load, Store>::operator*(ValueHandle e) {
-  using op::operator*;
-  return static_cast<ValueHandle>(*this) * e;
-}
-template <typename Load, typename Store>
-ValueHandle TemplatedIndexedValue<Load, Store>::operator/(ValueHandle e) {
-  using op::operator/;
-  return static_cast<ValueHandle>(*this) / e;
-}
-
-template <typename Load, typename Store>
-OperationHandle TemplatedIndexedValue<Load, Store>::operator+=(ValueHandle e) {
-  using op::operator+;
-  return Store(*this + e, getBase(), {indices.begin(), indices.end()});
-}
-template <typename Load, typename Store>
-OperationHandle TemplatedIndexedValue<Load, Store>::operator-=(ValueHandle e) {
-  using op::operator-;
-  return Store(*this - e, getBase(), {indices.begin(), indices.end()});
-}
-template <typename Load, typename Store>
-OperationHandle TemplatedIndexedValue<Load, Store>::operator*=(ValueHandle e) {
-  using op::operator*;
-  return Store(*this * e, getBase(), {indices.begin(), indices.end()});
-}
-template <typename Load, typename Store>
-OperationHandle TemplatedIndexedValue<Load, Store>::operator/=(ValueHandle e) {
-  using op::operator/;
-  return Store(*this / e, getBase(), {indices.begin(), indices.end()});
-}
-
-} // namespace edsc
-} // namespace mlir
-
-#endif // MLIR_EDSC_HELPERS_H_

diff  --git a/mlir/include/mlir/EDSC/Intrinsics.h b/mlir/include/mlir/EDSC/Intrinsics.h
index d800f3661bf7..3a8fc6d53468 100644
--- a/mlir/include/mlir/EDSC/Intrinsics.h
+++ b/mlir/include/mlir/EDSC/Intrinsics.h
@@ -7,7 +7,7 @@
 //===----------------------------------------------------------------------===//
 //
 // Provides intuitive composable intrinsics for building snippets of MLIR
-// declaratively
+// declaratively.
 //
 //===----------------------------------------------------------------------===//
 
@@ -15,6 +15,7 @@
 #define MLIR_EDSC_INTRINSICS_H_
 
 #include "mlir/EDSC/Builders.h"
+#include "mlir/IR/StandardTypes.h"
 #include "mlir/Support/LLVM.h"
 
 namespace mlir {
@@ -24,62 +25,16 @@ class Type;
 
 namespace edsc {
 
-/// An IndexHandle is a simple wrapper around a ValueHandle.
-/// IndexHandles are ubiquitous enough to justify a new type to allow simple
-/// declarations without boilerplate such as:
-///
-/// ```c++
-///    IndexHandle i, j, k;
-/// ```
-struct IndexHandle : public ValueHandle {
-  explicit IndexHandle()
-      : ValueHandle(ScopedContext::getBuilder().getIndexType()) {}
-  explicit IndexHandle(index_type v) : ValueHandle(v) {}
-  explicit IndexHandle(Value v) : ValueHandle(v) {
-    assert(v.getType() == ScopedContext::getBuilder().getIndexType() &&
-           "Expected index type");
-  }
-  explicit IndexHandle(ValueHandle v) : ValueHandle(v) {
-    assert(v.getType() == ScopedContext::getBuilder().getIndexType() &&
-           "Expected index type");
-  }
-  IndexHandle &operator=(const ValueHandle &v) {
-    assert(v.getType() == ScopedContext::getBuilder().getIndexType() &&
-           "Expected index type");
-    /// Creating a new IndexHandle(v) and then std::swap rightly complains the
-    /// binding has already occurred and that we should use another name.
-    this->t = v.getType();
-    this->v = v.getValue();
-    return *this;
-  }
-};
-
-inline SmallVector<IndexHandle, 8> makeIndexHandles(unsigned rank) {
-  return SmallVector<IndexHandle, 8>(rank);
-}
-
-/// Entry point to build multiple ValueHandle* from a mutable list `ivs` of T.
-template <typename T>
+/// Entry point to build multiple ValueHandle* from a mutable list `ivs`.
 inline SmallVector<ValueHandle *, 8>
-makeHandlePointers(MutableArrayRef<T> ivs) {
+makeHandlePointers(MutableArrayRef<ValueHandle> ivs) {
   SmallVector<ValueHandle *, 8> pivs;
   pivs.reserve(ivs.size());
-  for (auto &iv : ivs) {
+  for (auto &iv : ivs)
     pivs.push_back(&iv);
-  }
   return pivs;
 }
 
-/// Returns a vector of the underlying Value from `ivs`.
-inline SmallVector<Value, 8> extractValues(ArrayRef<IndexHandle> ivs) {
-  SmallVector<Value, 8> vals;
-  vals.reserve(ivs.size());
-  for (auto &iv : ivs) {
-    vals.push_back(iv.getValue());
-  }
-  return vals;
-}
-
 /// Provides a set of first class intrinsics.
 /// In the future, most of intrinsics related to Operation that don't contain
 /// other operations should be Tablegen'd.
@@ -93,13 +48,6 @@ class ValueHandleArray {
   ValueHandleArray(ArrayRef<ValueHandle> vals) {
     values.append(vals.begin(), vals.end());
   }
-  ValueHandleArray(ArrayRef<IndexHandle> vals) {
-    values.append(vals.begin(), vals.end());
-  }
-  ValueHandleArray(ArrayRef<index_type> vals) {
-    SmallVector<IndexHandle, 8> tmp(vals.begin(), vals.end());
-    values.append(tmp.begin(), tmp.end());
-  }
   operator ArrayRef<Value>() { return values; }
 
 private:
@@ -143,29 +91,6 @@ template <typename Op> struct ValueBuilder : public ValueHandle {
             detail::unpack(t1), detail::unpack(t2), detail::unpack(vs),
             detail::unpack(args)...)) {}
 
-  /// Folder-based
-  template <typename... Args>
-  ValueBuilder(OperationFolder *folder, Args... args)
-      : ValueHandle(ValueHandle::create<Op>(folder, detail::unpack(args)...)) {}
-  ValueBuilder(OperationFolder *folder, ArrayRef<ValueHandle> vs)
-      : ValueBuilder(ValueBuilder::create<Op>(folder, detail::unpack(vs))) {}
-  template <typename... Args>
-  ValueBuilder(OperationFolder *folder, ArrayRef<ValueHandle> vs, Args... args)
-      : ValueHandle(ValueHandle::create<Op>(folder, detail::unpack(vs),
-                                            detail::unpack(args)...)) {}
-  template <typename T, typename... Args>
-  ValueBuilder(OperationFolder *folder, T t, ArrayRef<ValueHandle> vs,
-               Args... args)
-      : ValueHandle(ValueHandle::create<Op>(folder, detail::unpack(t),
-                                            detail::unpack(vs),
-                                            detail::unpack(args)...)) {}
-  template <typename T1, typename T2, typename... Args>
-  ValueBuilder(OperationFolder *folder, T1 t1, T2 t2, ArrayRef<ValueHandle> vs,
-               Args... args)
-      : ValueHandle(ValueHandle::create<Op>(
-            folder, detail::unpack(t1), detail::unpack(t2), detail::unpack(vs),
-            detail::unpack(args)...)) {}
-
   ValueBuilder() : ValueHandle(ValueHandle::create<Op>()) {}
 };
 
@@ -191,88 +116,6 @@ template <typename Op> struct OperationBuilder : public OperationHandle {
   OperationBuilder() : OperationHandle(OperationHandle::create<Op>()) {}
 };
 
-using addf = ValueBuilder<AddFOp>;
-using affine_apply = ValueBuilder<AffineApplyOp>;
-using affine_if = OperationBuilder<AffineIfOp>;
-using affine_load = ValueBuilder<AffineLoadOp>;
-using affine_min = ValueBuilder<AffineMinOp>;
-using affine_max = ValueBuilder<AffineMaxOp>;
-using affine_store = OperationBuilder<AffineStoreOp>;
-using alloc = ValueBuilder<AllocOp>;
-using call = OperationBuilder<mlir::CallOp>;
-using constant_float = ValueBuilder<ConstantFloatOp>;
-using constant_index = ValueBuilder<ConstantIndexOp>;
-using constant_int = ValueBuilder<ConstantIntOp>;
-using dealloc = OperationBuilder<DeallocOp>;
-using dim = ValueBuilder<DimOp>;
-using muli = ValueBuilder<MulIOp>;
-using mulf = ValueBuilder<MulFOp>;
-using memref_cast = ValueBuilder<MemRefCastOp>;
-using ret = OperationBuilder<ReturnOp>;
-using select = ValueBuilder<SelectOp>;
-using std_load = ValueBuilder<LoadOp>;
-using std_store = OperationBuilder<StoreOp>;
-using subi = ValueBuilder<SubIOp>;
-using tanh = ValueBuilder<TanhOp>;
-using view = ValueBuilder<ViewOp>;
-using zero_extendi = ValueBuilder<ZeroExtendIOp>;
-using sign_extendi = ValueBuilder<SignExtendIOp>;
-
-/// Branches into the mlir::Block* captured by BlockHandle `b` with `operands`.
-///
-/// Prerequisites:
-///   All Handles have already captured previously constructed IR objects.
-OperationHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
-
-/// Creates a new mlir::Block* and branches to it from the current block.
-/// Argument types are specified by `operands`.
-/// Captures the new block in `bh` and the actual `operands` in `captures`. To
-/// insert the new mlir::Block*, a local ScopedContext is constructed and
-/// released to the current block. The branch operation is then added to the
-/// new block.
-///
-/// Prerequisites:
-///   `b` has not yet captured an mlir::Block*.
-///   No `captures` have captured any mlir::Value.
-///   All `operands` have already captured an mlir::Value
-///   captures.size() == operands.size()
-///   captures and operands are pairwise of the same type.
-OperationHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
-                   ArrayRef<ValueHandle> operands);
-
-/// Branches into the mlir::Block* captured by BlockHandle `trueBranch` with
-/// `trueOperands` if `cond` evaluates to `true` (resp. `falseBranch` and
-/// `falseOperand` if `cond` evaluates to `false`).
-///
-/// Prerequisites:
-///   All Handles have captured previously constructed IR objects.
-OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
-                        ArrayRef<ValueHandle> trueOperands,
-                        BlockHandle falseBranch,
-                        ArrayRef<ValueHandle> falseOperands);
-
-/// Eagerly creates new mlir::Block* with argument types specified by
-/// `trueOperands`/`falseOperands`.
-/// Captures the new blocks in `trueBranch`/`falseBranch` and the arguments in
-/// `trueCaptures/falseCaptures`.
-/// To insert the new mlir::Block*, a local ScopedContext is constructed and
-/// released. The branch operation is then added in the original location and
-/// targeting the eagerly constructed blocks.
-///
-/// Prerequisites:
-///   `trueBranch`/`falseBranch` has not yet captured an mlir::Block*.
-///   No `trueCaptures`/`falseCaptures` have captured any mlir::Value.
-///   All `trueOperands`/`trueOperands` have already captured an mlir::Value
-///   `trueCaptures`.size() == `trueOperands`.size()
-///   `falseCaptures`.size() == `falseOperands`.size()
-///   `trueCaptures` and `trueOperands` are pairwise of the same type
-///   `falseCaptures` and `falseOperands` are pairwise of the same type.
-OperationHandle cond_br(ValueHandle cond, BlockHandle *trueBranch,
-                        ArrayRef<ValueHandle *> trueCaptures,
-                        ArrayRef<ValueHandle> trueOperands,
-                        BlockHandle *falseBranch,
-                        ArrayRef<ValueHandle *> falseCaptures,
-                        ArrayRef<ValueHandle> falseOperands);
 } // namespace intrinsics
 } // namespace edsc
 } // namespace mlir

diff  --git a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
index 90312222e735..09f7e5f71d5e 100644
--- a/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
+++ b/mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
@@ -16,8 +16,7 @@
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Intrinsics.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/Attributes.h"
@@ -47,24 +46,22 @@ using namespace mlir::edsc::intrinsics;
 using namespace mlir::LLVM;
 using namespace mlir::linalg;
 
-using add = ValueBuilder<mlir::LLVM::AddOp>;
-using addi = ValueBuilder<mlir::AddIOp>;
-using bitcast = ValueBuilder<mlir::LLVM::BitcastOp>;
-using cmpi = ValueBuilder<mlir::CmpIOp>;
-using constant = ValueBuilder<mlir::LLVM::ConstantOp>;
-using extractvalue = ValueBuilder<mlir::LLVM::ExtractValueOp>;
-using gep = ValueBuilder<mlir::LLVM::GEPOp>;
-using insertvalue = ValueBuilder<mlir::LLVM::InsertValueOp>;
-using llvm_call = OperationBuilder<mlir::LLVM::CallOp>;
+using llvm_add = ValueBuilder<LLVM::AddOp>;
+using llvm_bitcast = ValueBuilder<LLVM::BitcastOp>;
+using llvm_constant = ValueBuilder<LLVM::ConstantOp>;
+using llvm_extractvalue = ValueBuilder<LLVM::ExtractValueOp>;
+using llvm_gep = ValueBuilder<LLVM::GEPOp>;
+using llvm_insertvalue = ValueBuilder<LLVM::InsertValueOp>;
+using llvm_call = OperationBuilder<LLVM::CallOp>;
 using llvm_icmp = ValueBuilder<LLVM::ICmpOp>;
 using llvm_load = ValueBuilder<LLVM::LoadOp>;
 using llvm_store = OperationBuilder<LLVM::StoreOp>;
 using llvm_select = ValueBuilder<LLVM::SelectOp>;
-using mul = ValueBuilder<mlir::LLVM::MulOp>;
-using ptrtoint = ValueBuilder<mlir::LLVM::PtrToIntOp>;
-using sub = ValueBuilder<mlir::LLVM::SubOp>;
-using llvm_undef = ValueBuilder<mlir::LLVM::UndefOp>;
-using urem = ValueBuilder<mlir::LLVM::URemOp>;
+using llvm_mul = ValueBuilder<LLVM::MulOp>;
+using llvm_ptrtoint = ValueBuilder<LLVM::PtrToIntOp>;
+using llvm_sub = ValueBuilder<LLVM::SubOp>;
+using llvm_undef = ValueBuilder<LLVM::UndefOp>;
+using llvm_urem = ValueBuilder<LLVM::URemOp>;
 using llvm_alloca = ValueBuilder<LLVM::AllocaOp>;
 using llvm_return = OperationBuilder<LLVM::ReturnOp>;
 
@@ -156,9 +153,9 @@ class RangeOpConversion : public LLVMOpLowering {
     // Fill in an aggregate value of the descriptor.
     RangeOpOperandAdaptor adaptor(operands);
     Value desc = llvm_undef(rangeDescriptorTy);
-    desc = insertvalue(desc, adaptor.min(), rewriter.getI64ArrayAttr(0));
-    desc = insertvalue(desc, adaptor.max(), rewriter.getI64ArrayAttr(1));
-    desc = insertvalue(desc, adaptor.step(), rewriter.getI64ArrayAttr(2));
+    desc = llvm_insertvalue(desc, adaptor.min(), rewriter.getI64ArrayAttr(0));
+    desc = llvm_insertvalue(desc, adaptor.max(), rewriter.getI64ArrayAttr(1));
+    desc = llvm_insertvalue(desc, adaptor.step(), rewriter.getI64ArrayAttr(2));
     rewriter.replaceOp(op, desc);
     return matchSuccess();
   }
@@ -249,8 +246,8 @@ class SliceOpConversion : public LLVMOpLowering {
       Value indexing = adaptor.indexings()[i];
       Value min = indexing;
       if (sliceOp.indexing(i).getType().isa<RangeType>())
-        min = extractvalue(int64Ty, indexing, pos(0));
-      baseOffset = add(baseOffset, mul(min, strides[i]));
+        min = llvm_extractvalue(int64Ty, indexing, pos(0));
+      baseOffset = llvm_add(baseOffset, llvm_mul(min, strides[i]));
     }
 
     // Insert the base and aligned pointers.
@@ -264,8 +261,8 @@ class SliceOpConversion : public LLVMOpLowering {
     if (sliceOp.getShapedType().getRank() == 0)
       return rewriter.replaceOp(op, {desc}), matchSuccess();
 
-    Value zero =
-        constant(int64Ty, rewriter.getIntegerAttr(rewriter.getIndexType(), 0));
+    Value zero = llvm_constant(
+        int64Ty, rewriter.getIntegerAttr(rewriter.getIndexType(), 0));
     // Compute and insert view sizes (max - min along the range) and strides.
     // Skip the non-range operands as they will be projected away from the view.
     int numNewDims = 0;
@@ -274,19 +271,19 @@ class SliceOpConversion : public LLVMOpLowering {
       if (indexing.getType().isa<RangeType>()) {
         int rank = en.index();
         Value rangeDescriptor = adaptor.indexings()[rank];
-        Value min = extractvalue(int64Ty, rangeDescriptor, pos(0));
-        Value max = extractvalue(int64Ty, rangeDescriptor, pos(1));
-        Value step = extractvalue(int64Ty, rangeDescriptor, pos(2));
+        Value min = llvm_extractvalue(int64Ty, rangeDescriptor, pos(0));
+        Value max = llvm_extractvalue(int64Ty, rangeDescriptor, pos(1));
+        Value step = llvm_extractvalue(int64Ty, rangeDescriptor, pos(2));
         Value baseSize = baseDesc.size(rank);
 
         // Bound upper by base view upper bound.
         max = llvm_select(llvm_icmp(ICmpPredicate::slt, max, baseSize), max,
                           baseSize);
-        Value size = sub(max, min);
+        Value size = llvm_sub(max, min);
         // Bound lower by zero.
         size =
             llvm_select(llvm_icmp(ICmpPredicate::slt, size, zero), zero, size);
-        Value stride = mul(strides[rank], step);
+        Value stride = llvm_mul(strides[rank], step);
         desc.setSize(numNewDims, size);
         desc.setStride(numNewDims, stride);
         ++numNewDims;
@@ -450,8 +447,7 @@ class LinalgOpConversion : public OpRewritePattern<LinalgOp> {
 
 /// Conversion pattern specialization for CopyOp. This kicks in when both input
 /// and output permutations are left unspecified or are the identity.
-template <>
-class LinalgOpConversion<CopyOp> : public OpRewritePattern<CopyOp> {
+template <> class LinalgOpConversion<CopyOp> : public OpRewritePattern<CopyOp> {
 public:
   using OpRewritePattern<CopyOp>::OpRewritePattern;
 

diff  --git a/mlir/lib/Conversion/VectorToLoops/ConvertVectorToLoops.cpp b/mlir/lib/Conversion/VectorToLoops/ConvertVectorToLoops.cpp
index 3b709f601ae3..9414b275e96d 100644
--- a/mlir/lib/Conversion/VectorToLoops/ConvertVectorToLoops.cpp
+++ b/mlir/lib/Conversion/VectorToLoops/ConvertVectorToLoops.cpp
@@ -13,9 +13,10 @@
 #include <type_traits>
 
 #include "mlir/Conversion/VectorToLoops/ConvertVectorToLoops.h"
+#include "mlir/Dialect/AffineOps/EDSC/Intrinsics.h"
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/VectorOps/VectorOps.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Helpers.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/Attributes.h"
@@ -27,17 +28,19 @@
 #include "mlir/IR/Types.h"
 
 using namespace mlir;
+using namespace mlir::edsc;
+using namespace mlir::edsc::intrinsics;
 using vector::TransferReadOp;
 using vector::TransferWriteOp;
 
 /// Analyzes the `transfer` to find an access dimension along the fastest remote
 /// MemRef dimension. If such a dimension with coalescing properties is found,
-/// `pivs` and `vectorView` are swapped so that the invocation of
+/// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of
 /// LoopNestBuilder captures it in the innermost loop.
 template <typename TransferOpTy>
 static void coalesceCopy(TransferOpTy transfer,
-                         SmallVectorImpl<edsc::ValueHandle *> *pivs,
-                         edsc::VectorView *vectorView) {
+                         SmallVectorImpl<ValueHandle *> *pivs,
+                         VectorBoundsCapture *vectorBoundsCapture) {
   // rank of the remote memory access, coalescing behavior occurs on the
   // innermost memory dimension.
   auto remoteRank = transfer.getMemRefType().getRank();
@@ -61,25 +64,22 @@ static void coalesceCopy(TransferOpTy transfer,
   }
   if (coalescedIdx >= 0) {
     std::swap(pivs->back(), (*pivs)[coalescedIdx]);
-    vectorView->swapRanges(pivs->size() - 1, coalescedIdx);
+    vectorBoundsCapture->swapRanges(pivs->size() - 1, coalescedIdx);
   }
 }
 
 /// Emits remote memory accesses that are clipped to the boundaries of the
 /// MemRef.
 template <typename TransferOpTy>
-static SmallVector<edsc::ValueHandle, 8> clip(TransferOpTy transfer,
-                                              edsc::MemRefView &view,
-                                              ArrayRef<edsc::IndexHandle> ivs) {
+static SmallVector<ValueHandle, 8> clip(TransferOpTy transfer,
+                                        MemRefBoundsCapture &bounds,
+                                        ArrayRef<ValueHandle> ivs) {
   using namespace mlir::edsc;
-  using namespace edsc::op;
-  using edsc::intrinsics::select;
-
-  IndexHandle zero(index_type(0)), one(index_type(1));
-  SmallVector<edsc::ValueHandle, 8> memRefAccess(transfer.indices());
-  SmallVector<edsc::ValueHandle, 8> clippedScalarAccessExprs(
-      memRefAccess.size(), edsc::IndexHandle());
 
+  ValueHandle zero(std_constant_index(0)), one(std_constant_index(1));
+  SmallVector<ValueHandle, 8> memRefAccess(transfer.indices());
+  auto clippedScalarAccessExprs =
+      ValueHandle::makeIndexHandles(memRefAccess.size());
   // Indices accessing to remote memory are clipped and their expressions are
   // returned in clippedScalarAccessExprs.
   for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size();
@@ -103,19 +103,21 @@ static SmallVector<edsc::ValueHandle, 8> clip(TransferOpTy transfer,
     // We cannot distinguish atm between unrolled dimensions that implement
     // the "always full" tile abstraction and need clipping from the other
     // ones. So we conservatively clip everything.
-    auto N = view.ub(memRefDim);
+    using namespace edsc::op;
+    auto N = bounds.ub(memRefDim);
     auto i = memRefAccess[memRefDim];
     if (loopIndex < 0) {
       auto N_minus_1 = N - one;
-      auto select_1 = select(i < N, i, N_minus_1);
-      clippedScalarAccessExprs[memRefDim] = select(i < zero, zero, select_1);
+      auto select_1 = std_select(i < N, i, N_minus_1);
+      clippedScalarAccessExprs[memRefDim] =
+          std_select(i < zero, zero, select_1);
     } else {
       auto ii = ivs[loopIndex];
       auto i_plus_ii = i + ii;
       auto N_minus_1 = N - one;
-      auto select_1 = select(i_plus_ii < N, i_plus_ii, N_minus_1);
+      auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1);
       clippedScalarAccessExprs[memRefDim] =
-          select(i_plus_ii < zero, zero, select_1);
+          std_select(i_plus_ii < zero, zero, select_1);
     }
   }
 
@@ -165,9 +167,9 @@ using vector_type_cast = edsc::intrinsics::ValueBuilder<vector::TypeCastOp>;
 ///
 /// ```mlir-dsc
 ///    auto condMax = i + ii < N;
-///    auto max = select(condMax, i + ii, N - one)
+///    auto max = std_select(condMax, i + ii, N - one)
 ///    auto cond = i + ii < zero;
-///    select(cond, zero, max);
+///    std_select(cond, zero, max);
 /// ```
 ///
 /// In the future, clipping should not be the only way and instead we should
@@ -246,41 +248,37 @@ struct VectorTransferRewriter : public RewritePattern {
 template <>
 PatternMatchResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
     Operation *op, PatternRewriter &rewriter) const {
-  using namespace mlir::edsc;
   using namespace mlir::edsc::op;
-  using namespace mlir::edsc::intrinsics;
-  using IndexedValue =
-      TemplatedIndexedValue<intrinsics::std_load, intrinsics::std_store>;
 
   TransferReadOp transfer = cast<TransferReadOp>(op);
 
   // 1. Setup all the captures.
   ScopedContext scope(rewriter, transfer.getLoc());
-  IndexedValue remote(transfer.memref());
-  MemRefView view(transfer.memref());
-  VectorView vectorView(transfer.vector());
-  SmallVector<IndexHandle, 8> ivs = makeIndexHandles(vectorView.rank());
+  StdIndexedValue remote(transfer.memref());
+  MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
+  VectorBoundsCapture vectorBoundsCapture(transfer.vector());
+  auto ivs = ValueHandle::makeIndexHandles(vectorBoundsCapture.rank());
   SmallVector<ValueHandle *, 8> pivs =
-      makeHandlePointers(MutableArrayRef<IndexHandle>(ivs));
-  coalesceCopy(transfer, &pivs, &vectorView);
+      makeHandlePointers(MutableArrayRef<ValueHandle>(ivs));
+  coalesceCopy(transfer, &pivs, &vectorBoundsCapture);
 
-  auto lbs = vectorView.getLbs();
-  auto ubs = vectorView.getUbs();
+  auto lbs = vectorBoundsCapture.getLbs();
+  auto ubs = vectorBoundsCapture.getUbs();
   SmallVector<ValueHandle, 8> steps;
-  steps.reserve(vectorView.getSteps().size());
-  for (auto step : vectorView.getSteps())
-    steps.push_back(constant_index(step));
+  steps.reserve(vectorBoundsCapture.getSteps().size());
+  for (auto step : vectorBoundsCapture.getSteps())
+    steps.push_back(std_constant_index(step));
 
   // 2. Emit alloc-copy-load-dealloc.
-  ValueHandle tmp = alloc(tmpMemRefType(transfer));
-  IndexedValue local(tmp);
+  ValueHandle tmp = std_alloc(tmpMemRefType(transfer));
+  StdIndexedValue local(tmp);
   ValueHandle vec = vector_type_cast(tmp);
   LoopNestBuilder(pivs, lbs, ubs, steps)([&] {
     // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
-    local(ivs) = remote(clip(transfer, view, ivs));
+    local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs));
   });
   ValueHandle vectorValue = std_load(vec);
-  (dealloc(tmp)); // vexing parse
+  (std_dealloc(tmp)); // vexing parse
 
   // 3. Propagate.
   rewriter.replaceOp(op, vectorValue.getValue());
@@ -308,42 +306,38 @@ PatternMatchResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
 template <>
 PatternMatchResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
     Operation *op, PatternRewriter &rewriter) const {
-  using namespace mlir::edsc;
-  using namespace mlir::edsc::op;
-  using namespace mlir::edsc::intrinsics;
-  using IndexedValue =
-      TemplatedIndexedValue<intrinsics::std_load, intrinsics::std_store>;
+  using namespace edsc::op;
 
   TransferWriteOp transfer = cast<TransferWriteOp>(op);
 
   // 1. Setup all the captures.
   ScopedContext scope(rewriter, transfer.getLoc());
-  IndexedValue remote(transfer.memref());
-  MemRefView view(transfer.memref());
+  StdIndexedValue remote(transfer.memref());
+  MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
   ValueHandle vectorValue(transfer.vector());
-  VectorView vectorView(transfer.vector());
-  SmallVector<IndexHandle, 8> ivs = makeIndexHandles(vectorView.rank());
+  VectorBoundsCapture vectorBoundsCapture(transfer.vector());
+  auto ivs = ValueHandle::makeIndexHandles(vectorBoundsCapture.rank());
   SmallVector<ValueHandle *, 8> pivs =
-      makeHandlePointers(MutableArrayRef<IndexHandle>(ivs));
-  coalesceCopy(transfer, &pivs, &vectorView);
+      makeHandlePointers(MutableArrayRef<ValueHandle>(ivs));
+  coalesceCopy(transfer, &pivs, &vectorBoundsCapture);
 
-  auto lbs = vectorView.getLbs();
-  auto ubs = vectorView.getUbs();
+  auto lbs = vectorBoundsCapture.getLbs();
+  auto ubs = vectorBoundsCapture.getUbs();
   SmallVector<ValueHandle, 8> steps;
-  steps.reserve(vectorView.getSteps().size());
-  for (auto step : vectorView.getSteps())
-    steps.push_back(constant_index(step));
+  steps.reserve(vectorBoundsCapture.getSteps().size());
+  for (auto step : vectorBoundsCapture.getSteps())
+    steps.push_back(std_constant_index(step));
 
   // 2. Emit alloc-store-copy-dealloc.
-  ValueHandle tmp = alloc(tmpMemRefType(transfer));
-  IndexedValue local(tmp);
+  ValueHandle tmp = std_alloc(tmpMemRefType(transfer));
+  StdIndexedValue local(tmp);
   ValueHandle vec = vector_type_cast(tmp);
   std_store(vectorValue, vec);
   LoopNestBuilder(pivs, lbs, ubs, steps)([&] {
     // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
-    remote(clip(transfer, view, ivs)) = local(ivs);
+    remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs);
   });
-  (dealloc(tmp)); // vexing parse...
+  (std_dealloc(tmp)); // vexing parse...
 
   rewriter.eraseOp(op);
   return matchSuccess();

diff  --git a/mlir/lib/Dialect/AffineOps/CMakeLists.txt b/mlir/lib/Dialect/AffineOps/CMakeLists.txt
index f2913df36c9c..9648f27f1067 100644
--- a/mlir/lib/Dialect/AffineOps/CMakeLists.txt
+++ b/mlir/lib/Dialect/AffineOps/CMakeLists.txt
@@ -2,13 +2,22 @@ add_llvm_library(MLIRAffineOps
   AffineOps.cpp
   AffineValueMap.cpp
   DialectRegistration.cpp
-
+  EDSC/Builders.cpp
+  
   ADDITIONAL_HEADER_DIRS
   ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/AffineOps
   )
 add_dependencies(MLIRAffineOps
+
   MLIRAffineOpsIncGen
+  MLIREDSC
   MLIRIR
   MLIRLoopLikeInterfaceIncGen
-  MLIRStandardOps)
-target_link_libraries(MLIRAffineOps MLIRIR MLIRStandardOps)
+  MLIRStandardOps
+  )
+target_link_libraries(MLIRAffineOps
+
+  MLIREDSC
+  MLIRIR
+  MLIRStandardOps
+  )

diff  --git a/mlir/lib/Dialect/AffineOps/EDSC/Builders.cpp b/mlir/lib/Dialect/AffineOps/EDSC/Builders.cpp
new file mode 100644
index 000000000000..88f363302f84
--- /dev/null
+++ b/mlir/lib/Dialect/AffineOps/EDSC/Builders.cpp
@@ -0,0 +1,286 @@
+//===- Builders.cpp - MLIR Declarative Builder Classes --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/AffineOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Builders.h"
+#include "mlir/IR/AffineExpr.h"
+#include "mlir/IR/AffineMap.h"
+
+using namespace mlir;
+using namespace mlir::edsc;
+
+static Optional<ValueHandle> emitStaticFor(ArrayRef<ValueHandle> lbs,
+                                           ArrayRef<ValueHandle> ubs,
+                                           int64_t step) {
+  if (lbs.size() != 1 || ubs.size() != 1)
+    return Optional<ValueHandle>();
+
+  auto *lbDef = lbs.front().getValue().getDefiningOp();
+  auto *ubDef = ubs.front().getValue().getDefiningOp();
+  if (!lbDef || !ubDef)
+    return Optional<ValueHandle>();
+
+  auto lbConst = dyn_cast<ConstantIndexOp>(lbDef);
+  auto ubConst = dyn_cast<ConstantIndexOp>(ubDef);
+  if (!lbConst || !ubConst)
+    return Optional<ValueHandle>();
+
+  return ValueHandle(ScopedContext::getBuilder()
+                         .create<AffineForOp>(ScopedContext::getLocation(),
+                                              lbConst.getValue(),
+                                              ubConst.getValue(), step)
+                         .getInductionVar());
+}
+
+LoopBuilder mlir::edsc::makeAffineLoopBuilder(ValueHandle *iv,
+                                              ArrayRef<ValueHandle> lbHandles,
+                                              ArrayRef<ValueHandle> ubHandles,
+                                              int64_t step) {
+  mlir::edsc::LoopBuilder result;
+  if (auto staticFor = emitStaticFor(lbHandles, ubHandles, step)) {
+    *iv = staticFor.getValue();
+  } else {
+    SmallVector<Value, 4> lbs(lbHandles.begin(), lbHandles.end());
+    SmallVector<Value, 4> ubs(ubHandles.begin(), ubHandles.end());
+    auto b = ScopedContext::getBuilder();
+    *iv = ValueHandle(
+        b.create<AffineForOp>(ScopedContext::getLocation(), lbs,
+                              b.getMultiDimIdentityMap(lbs.size()), ubs,
+                              b.getMultiDimIdentityMap(ubs.size()), step)
+            .getInductionVar());
+  }
+  auto *body = getForInductionVarOwner(iv->getValue()).getBody();
+  result.enter(body, /*prev=*/1);
+  return result;
+}
+
+mlir::edsc::AffineLoopNestBuilder::AffineLoopNestBuilder(
+    ValueHandle *iv, ArrayRef<ValueHandle> lbs, ArrayRef<ValueHandle> ubs,
+    int64_t step) {
+  loops.emplace_back(makeAffineLoopBuilder(iv, lbs, ubs, step));
+}
+
+mlir::edsc::AffineLoopNestBuilder::AffineLoopNestBuilder(
+    ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
+    ArrayRef<ValueHandle> ubs, ArrayRef<int64_t> steps) {
+  assert(ivs.size() == lbs.size() && "Mismatch in number of arguments");
+  assert(ivs.size() == ubs.size() && "Mismatch in number of arguments");
+  assert(ivs.size() == steps.size() && "Mismatch in number of arguments");
+  for (auto it : llvm::zip(ivs, lbs, ubs, steps))
+    loops.emplace_back(makeAffineLoopBuilder(std::get<0>(it), std::get<1>(it),
+                                             std::get<2>(it), std::get<3>(it)));
+}
+
+void mlir::edsc::AffineLoopNestBuilder::operator()(
+    function_ref<void(void)> fun) {
+  if (fun)
+    fun();
+  // Iterate on the calling operator() on all the loops in the nest.
+  // The iteration order is from innermost to outermost because enter/exit needs
+  // to be asymmetric (i.e. enter() occurs on LoopBuilder construction, exit()
+  // occurs on calling operator()). The asymmetry is required for properly
+  // nesting imperfectly nested regions (see LoopBuilder::operator()).
+  for (auto lit = loops.rbegin(), eit = loops.rend(); lit != eit; ++lit)
+    (*lit)();
+}
+
+template <typename Op>
+static ValueHandle createBinaryHandle(ValueHandle lhs, ValueHandle rhs) {
+  return ValueHandle::create<Op>(lhs.getValue(), rhs.getValue());
+}
+
+static std::pair<AffineExpr, Value>
+categorizeValueByAffineType(MLIRContext *context, Value val, unsigned &numDims,
+                            unsigned &numSymbols) {
+  AffineExpr d;
+  Value resultVal = nullptr;
+  if (auto constant = dyn_cast_or_null<ConstantIndexOp>(val.getDefiningOp())) {
+    d = getAffineConstantExpr(constant.getValue(), context);
+  } else if (isValidSymbol(val) && !isValidDim(val)) {
+    d = getAffineSymbolExpr(numSymbols++, context);
+    resultVal = val;
+  } else {
+    d = getAffineDimExpr(numDims++, context);
+    resultVal = val;
+  }
+  return std::make_pair(d, resultVal);
+}
+
+static ValueHandle createBinaryIndexHandle(
+    ValueHandle lhs, ValueHandle rhs,
+    function_ref<AffineExpr(AffineExpr, AffineExpr)> affCombiner) {
+  MLIRContext *context = ScopedContext::getContext();
+  unsigned numDims = 0, numSymbols = 0;
+  AffineExpr d0, d1;
+  Value v0, v1;
+  std::tie(d0, v0) =
+      categorizeValueByAffineType(context, lhs.getValue(), numDims, numSymbols);
+  std::tie(d1, v1) =
+      categorizeValueByAffineType(context, rhs.getValue(), numDims, numSymbols);
+  SmallVector<Value, 2> operands;
+  if (v0) {
+    operands.push_back(v0);
+  }
+  if (v1) {
+    operands.push_back(v1);
+  }
+  auto map = AffineMap::get(numDims, numSymbols, {affCombiner(d0, d1)});
+  // TODO: createOrFold when available.
+  Operation *op =
+      makeComposedAffineApply(ScopedContext::getBuilder(),
+                              ScopedContext::getLocation(), map, operands)
+          .getOperation();
+  assert(op->getNumResults() == 1 && "Expected single result AffineApply");
+  return ValueHandle(op->getResult(0));
+}
+
+template <typename IOp, typename FOp>
+static ValueHandle createBinaryHandle(
+    ValueHandle lhs, ValueHandle rhs,
+    function_ref<AffineExpr(AffineExpr, AffineExpr)> affCombiner) {
+  auto thisType = lhs.getValue().getType();
+  auto thatType = rhs.getValue().getType();
+  assert(thisType == thatType && "cannot mix types in operators");
+  (void)thisType;
+  (void)thatType;
+  if (thisType.isIndex()) {
+    return createBinaryIndexHandle(lhs, rhs, affCombiner);
+  } else if (thisType.isa<IntegerType>()) {
+    return createBinaryHandle<IOp>(lhs, rhs);
+  } else if (thisType.isa<FloatType>()) {
+    return createBinaryHandle<FOp>(lhs, rhs);
+  } else if (thisType.isa<VectorType>() || thisType.isa<TensorType>()) {
+    auto aggregateType = thisType.cast<ShapedType>();
+    if (aggregateType.getElementType().isa<IntegerType>())
+      return createBinaryHandle<IOp>(lhs, rhs);
+    else if (aggregateType.getElementType().isa<FloatType>())
+      return createBinaryHandle<FOp>(lhs, rhs);
+  }
+  llvm_unreachable("failed to create a ValueHandle");
+}
+
+ValueHandle mlir::edsc::op::operator+(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryHandle<AddIOp, AddFOp>(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 + d1; });
+}
+
+ValueHandle mlir::edsc::op::operator-(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryHandle<SubIOp, SubFOp>(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 - d1; });
+}
+
+ValueHandle mlir::edsc::op::operator*(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryHandle<MulIOp, MulFOp>(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 * d1; });
+}
+
+ValueHandle mlir::edsc::op::operator/(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryHandle<SignedDivIOp, DivFOp>(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) -> AffineExpr {
+        llvm_unreachable("only exprs of non-index type support operator/");
+      });
+}
+
+ValueHandle mlir::edsc::op::operator%(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryHandle<SignedRemIOp, RemFOp>(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 % d1; });
+}
+
+ValueHandle mlir::edsc::op::floorDiv(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryIndexHandle(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0.floorDiv(d1); });
+}
+
+ValueHandle mlir::edsc::op::ceilDiv(ValueHandle lhs, ValueHandle rhs) {
+  return createBinaryIndexHandle(
+      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0.ceilDiv(d1); });
+}
+
+ValueHandle mlir::edsc::op::operator!(ValueHandle value) {
+  assert(value.getType().isInteger(1) && "expected boolean expression");
+  return ValueHandle::create<ConstantIntOp>(1, 1) - value;
+}
+
+ValueHandle mlir::edsc::op::operator&&(ValueHandle lhs, ValueHandle rhs) {
+  assert(lhs.getType().isInteger(1) && "expected boolean expression on LHS");
+  assert(rhs.getType().isInteger(1) && "expected boolean expression on RHS");
+  return lhs * rhs;
+}
+
+ValueHandle mlir::edsc::op::operator||(ValueHandle lhs, ValueHandle rhs) {
+  return !(!lhs && !rhs);
+}
+
+static ValueHandle createIComparisonExpr(CmpIPredicate predicate,
+                                         ValueHandle lhs, ValueHandle rhs) {
+  auto lhsType = lhs.getType();
+  auto rhsType = rhs.getType();
+  (void)lhsType;
+  (void)rhsType;
+  assert(lhsType == rhsType && "cannot mix types in operators");
+  assert((lhsType.isa<IndexType>() || lhsType.isa<IntegerType>()) &&
+         "only integer comparisons are supported");
+
+  auto op = ScopedContext::getBuilder().create<CmpIOp>(
+      ScopedContext::getLocation(), predicate, lhs.getValue(), rhs.getValue());
+  return ValueHandle(op.getResult());
+}
+
+static ValueHandle createFComparisonExpr(CmpFPredicate predicate,
+                                         ValueHandle lhs, ValueHandle rhs) {
+  auto lhsType = lhs.getType();
+  auto rhsType = rhs.getType();
+  (void)lhsType;
+  (void)rhsType;
+  assert(lhsType == rhsType && "cannot mix types in operators");
+  assert(lhsType.isa<FloatType>() && "only float comparisons are supported");
+
+  auto op = ScopedContext::getBuilder().create<CmpFOp>(
+      ScopedContext::getLocation(), predicate, lhs.getValue(), rhs.getValue());
+  return ValueHandle(op.getResult());
+}
+
+// All floating point comparison are ordered through EDSL
+ValueHandle mlir::edsc::op::operator==(ValueHandle lhs, ValueHandle rhs) {
+  auto type = lhs.getType();
+  return type.isa<FloatType>()
+             ? createFComparisonExpr(CmpFPredicate::OEQ, lhs, rhs)
+             : createIComparisonExpr(CmpIPredicate::eq, lhs, rhs);
+}
+ValueHandle mlir::edsc::op::operator!=(ValueHandle lhs, ValueHandle rhs) {
+  auto type = lhs.getType();
+  return type.isa<FloatType>()
+             ? createFComparisonExpr(CmpFPredicate::ONE, lhs, rhs)
+             : createIComparisonExpr(CmpIPredicate::ne, lhs, rhs);
+}
+ValueHandle mlir::edsc::op::operator<(ValueHandle lhs, ValueHandle rhs) {
+  auto type = lhs.getType();
+  return type.isa<FloatType>()
+             ? createFComparisonExpr(CmpFPredicate::OLT, lhs, rhs)
+             :
+             // TODO(ntv,zinenko): signed by default, how about unsigned?
+             createIComparisonExpr(CmpIPredicate::slt, lhs, rhs);
+}
+ValueHandle mlir::edsc::op::operator<=(ValueHandle lhs, ValueHandle rhs) {
+  auto type = lhs.getType();
+  return type.isa<FloatType>()
+             ? createFComparisonExpr(CmpFPredicate::OLE, lhs, rhs)
+             : createIComparisonExpr(CmpIPredicate::sle, lhs, rhs);
+}
+ValueHandle mlir::edsc::op::operator>(ValueHandle lhs, ValueHandle rhs) {
+  auto type = lhs.getType();
+  return type.isa<FloatType>()
+             ? createFComparisonExpr(CmpFPredicate::OGT, lhs, rhs)
+             : createIComparisonExpr(CmpIPredicate::sgt, lhs, rhs);
+}
+ValueHandle mlir::edsc::op::operator>=(ValueHandle lhs, ValueHandle rhs) {
+  auto type = lhs.getType();
+  return type.isa<FloatType>()
+             ? createFComparisonExpr(CmpFPredicate::OGE, lhs, rhs)
+             : createIComparisonExpr(CmpIPredicate::sge, lhs, rhs);
+}

diff  --git a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
index c8fefd736c8a..b24a925aaad3 100644
--- a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
@@ -13,14 +13,15 @@
 
 #include "mlir/Dialect/GPU/MemoryPromotion.h"
 #include "mlir/Dialect/GPU/GPUDialect.h"
-#include "mlir/Dialect/LoopOps/LoopOps.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Helpers.h"
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/Pass/Pass.h"
 #include "mlir/Support/Functional.h"
 #include "mlir/Transforms/LoopUtils.h"
 
 using namespace mlir;
+using namespace mlir::edsc;
+using namespace mlir::edsc::intrinsics;
 using namespace mlir::gpu;
 
 /// Returns the textual name of a GPU dimension.
@@ -41,17 +42,17 @@ static StringRef getDimName(unsigned dim) {
 /// single-iteration loops. Maps the innermost loops to thread dimensions, in
 /// reverse order to enable access coalescing in the innermost loop.
 static void insertCopyLoops(OpBuilder &builder, Location loc,
-                            edsc::MemRefView &bounds, Value from, Value to) {
+                            MemRefBoundsCapture &bounds, Value from, Value to) {
   // Create EDSC handles for bounds.
   unsigned rank = bounds.rank();
-  SmallVector<edsc::ValueHandle, 4> lbs, ubs, steps;
+  SmallVector<ValueHandle, 4> lbs, ubs, steps;
 
   // Make sure we have enough loops to use all thread dimensions, these trivial
   // loops should be outermost and therefore inserted first.
   if (rank < GPUDialect::getNumWorkgroupDimensions()) {
     unsigned extraLoops = GPUDialect::getNumWorkgroupDimensions() - rank;
-    edsc::ValueHandle zero = edsc::intrinsics::constant_index(0);
-    edsc::ValueHandle one = edsc::intrinsics::constant_index(1);
+    ValueHandle zero = std_constant_index(0);
+    ValueHandle one = std_constant_index(1);
     lbs.resize(extraLoops, zero);
     ubs.resize(extraLoops, one);
     steps.resize(extraLoops, one);
@@ -63,9 +64,8 @@ static void insertCopyLoops(OpBuilder &builder, Location loc,
 
   // Emit constant operations for steps.
   steps.reserve(lbs.size());
-  llvm::transform(
-      bounds.getSteps(), std::back_inserter(steps),
-      [](int64_t step) { return edsc::intrinsics::constant_index(step); });
+  llvm::transform(bounds.getSteps(), std::back_inserter(steps),
+                  [](int64_t step) { return std_constant_index(step); });
 
   // Obtain thread identifiers and block sizes, necessary to map to them.
   auto indexType = builder.getIndexType();
@@ -79,12 +79,11 @@ static void insertCopyLoops(OpBuilder &builder, Location loc,
   }
 
   // Produce the loop nest with copies.
-  auto ivs = edsc::makeIndexHandles(lbs.size());
-  auto ivPtrs =
-      edsc::makeHandlePointers(MutableArrayRef<edsc::IndexHandle>(ivs));
-  edsc::LoopNestBuilder(ivPtrs, lbs, ubs, steps)([&]() {
+  SmallVector<ValueHandle, 8> ivs(lbs.size(), ValueHandle(indexType));
+  auto ivPtrs = makeHandlePointers(MutableArrayRef<ValueHandle>(ivs));
+  LoopNestBuilder(ivPtrs, lbs, ubs, steps)([&]() {
     auto activeIvs = llvm::makeArrayRef(ivs).take_back(rank);
-    edsc::StdIndexedValue fromHandle(from), toHandle(to);
+    StdIndexedValue fromHandle(from), toHandle(to);
     toHandle(activeIvs) = fromHandle(activeIvs);
   });
 
@@ -146,14 +145,14 @@ static void insertCopies(Region &region, Location loc, Value from, Value to) {
   OpBuilder builder(region.getContext());
   builder.setInsertionPointToStart(&region.front());
 
-  edsc::ScopedContext edscContext(builder, loc);
-  edsc::MemRefView fromView(from);
-  insertCopyLoops(builder, loc, fromView, from, to);
+  ScopedContext edscContext(builder, loc);
+  MemRefBoundsCapture fromBoundsCapture(from);
+  insertCopyLoops(builder, loc, fromBoundsCapture, from, to);
   builder.create<gpu::BarrierOp>(loc);
 
   builder.setInsertionPoint(&region.front().back());
   builder.create<gpu::BarrierOp>(loc);
-  insertCopyLoops(builder, loc, fromView, to, from);
+  insertCopyLoops(builder, loc, fromBoundsCapture, to, from);
 }
 
 /// Promotes a function argument to workgroup memory in the given function. The

diff  --git a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
index 7aaf6307a8e5..f2f8e5551522 100644
--- a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
+++ b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
@@ -6,20 +6,18 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/Dialect/AffineOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
-#include "mlir/IR/Builders.h"
 #include "mlir/Support/Functional.h"
 
 using namespace mlir;
 using namespace mlir::edsc;
 using namespace mlir::edsc::intrinsics;
-using namespace mlir::edsc::ops;
 using namespace mlir::linalg;
 using namespace mlir::loop;
 
@@ -261,8 +259,8 @@ Operation *mlir::edsc::ops::linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
 
 Operation *mlir::edsc::ops::linalg_pointwise_tanh(StructuredIndexed I,
                                                   StructuredIndexed O) {
-  using edsc::intrinsics::tanh;
-  UnaryPointwiseOpBuilder unOp([](ValueHandle a) -> Value { return tanh(a); });
+  UnaryPointwiseOpBuilder unOp(
+      [](ValueHandle a) -> Value { return std_tanh(a); });
   return linalg_pointwise(unOp, I, O);
 }
 
@@ -302,9 +300,8 @@ Operation *mlir::edsc::ops::linalg_pointwise_max(StructuredIndexed I1,
                                                  StructuredIndexed I2,
                                                  StructuredIndexed O) {
   BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> Value {
-    using edsc::intrinsics::select;
     using edsc::op::operator>;
-    return select(a > b, a, b).getValue();
+    return std_select(a > b, a, b).getValue();
   });
   return linalg_pointwise(binOp, I1, I2, O);
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 0228d6a4ca7b..e48ac4ef28f0 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -12,11 +12,12 @@
 
 #include "mlir/Analysis/Dominance.h"
 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
+#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
-#include "mlir/EDSC/Helpers.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/OpImplementation.h"
@@ -36,6 +37,8 @@ using namespace mlir::edsc;
 using namespace mlir::edsc::intrinsics;
 using namespace mlir::linalg;
 
+using folded_std_constant_index = folded::ValueBuilder<ConstantIndexOp>;
+
 using llvm::dbgs;
 
 /// Implements a simple high-level fusion pass of linalg library operations.
@@ -188,9 +191,11 @@ static LinalgOp fuse(Value producedView, LinalgOp producer, LinalgOp consumer,
                  << "existing LoopRange: " << loopRanges[i] << "\n");
     else {
       auto viewDim = getViewDefiningLoopRange(producer, i);
-      loopRanges[i] = SubViewOp::Range{constant_index(folder, 0),
-                                       dim(viewDim.view, viewDim.dimension),
-                                       constant_index(folder, 1)};
+      loopRanges[i] = SubViewOp::Range{
+          folded_std_constant_index(folder, 0),
+          std_dim(viewDim.view, viewDim.dimension),
+          folded_std_constant_index(folder, 1)
+      };
       LLVM_DEBUG(llvm::dbgs() << "new LoopRange: " << loopRanges[i] << "\n");
     }
   }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
index bc325d6a0289..a160ccd1e5c6 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
@@ -6,16 +6,15 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
+#include "mlir/Dialect/AffineOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
 #include "mlir/Dialect/Linalg/Transforms/LinalgTransforms.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
-#include "mlir/Dialect/LoopOps/LoopOps.h"
-#include "mlir/Dialect/StandardOps/Ops.h"
-#include "mlir/EDSC/Helpers.h"
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/BlockAndValueMapping.h"
@@ -31,9 +30,6 @@ using namespace mlir::edsc;
 using namespace mlir::edsc::intrinsics;
 using namespace mlir::linalg;
 
-using IndexedStdValue = TemplatedIndexedValue<std_load, std_store>;
-using IndexedAffineValue = TemplatedIndexedValue<affine_load, affine_store>;
-
 using edsc::op::operator+;
 using edsc::op::operator==;
 
@@ -77,7 +73,7 @@ SmallVector<Value, 4> emitLoopRanges(OpBuilder &b, Location loc, AffineMap map,
   SmallVector<Value, 4> res;
   for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
     res.push_back(
-        linalg_range(constant_index(0), sizes[idx], constant_index(1)));
+        linalg_range(std_constant_index(0), sizes[idx], std_constant_index(1)));
   }
   return res;
 }
@@ -98,8 +94,8 @@ class LinalgScopedEmitter<IndexedValueType, CopyOp> {
         permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation());
     auto outputIvs =
         permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation());
-    SmallVector<IndexHandle, 8> iivs(inputIvs.begin(), inputIvs.end());
-    SmallVector<IndexHandle, 8> oivs(outputIvs.begin(), outputIvs.end());
+    SmallVector<ValueHandle, 8> iivs(inputIvs.begin(), inputIvs.end());
+    SmallVector<ValueHandle, 8> oivs(outputIvs.begin(), outputIvs.end());
     IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0));
     // Emit the proper scalar assignment, whether we are dealing with a 0-D or
     // an n-D loop nest; with or without permutations.
@@ -119,7 +115,7 @@ class LinalgScopedEmitter<IndexedValueType, FillOp> {
     auto nPar = fillOp.getNumParallelLoops();
     assert(nPar == allIvs.size());
     auto ivs =
-        SmallVector<IndexHandle, 4>(allIvs.begin(), allIvs.begin() + nPar);
+        SmallVector<ValueHandle, 4>(allIvs.begin(), allIvs.begin() + nPar);
     IndexedValueType O(fillOp.getOutputBuffer(0));
     // Emit the proper scalar assignment, whether we are dealing with a 0-D or
     // an n-D loop nest; with or without permutations.
@@ -135,7 +131,7 @@ class LinalgScopedEmitter<IndexedValueType, DotOp> {
     assert(dotOp.hasBufferSemantics() &&
            "expected linalg op with buffer semantics");
     assert(allIvs.size() == 1);
-    IndexHandle r_i(allIvs[0]);
+    ValueHandle r_i(allIvs[0]);
     IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
         C(dotOp.getOutputBuffer(0));
     // Emit scalar form.
@@ -151,7 +147,7 @@ class LinalgScopedEmitter<IndexedValueType, MatvecOp> {
     assert(matvecOp.hasBufferSemantics() &&
            "expected linalg op with buffer semantics");
     assert(allIvs.size() == 2);
-    IndexHandle i(allIvs[0]), r_j(allIvs[1]);
+    ValueHandle i(allIvs[0]), r_j(allIvs[1]);
     IndexedValueType A(matvecOp.getInput(0)), B(matvecOp.getInput(1)),
         C(matvecOp.getOutputBuffer(0));
     // Emit scalar form.
@@ -167,7 +163,7 @@ class LinalgScopedEmitter<IndexedValueType, MatmulOp> {
     assert(matmulOp.hasBufferSemantics() &&
            "expected linalg op with buffer semantics");
     assert(allIvs.size() == 3);
-    IndexHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
+    ValueHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
     IndexedValueType A(matmulOp.getInput(0)), B(matmulOp.getInput(1)),
         C(matmulOp.getOutputBuffer(0));
     // Emit scalar form.
@@ -258,7 +254,7 @@ class LinalgScopedEmitter<IndexedValueType, GenericOp> {
     auto funcOp = genericOp.getFunction();
     if (funcOp) {
       // 2. Emit call.
-      Operation *callOp = call(funcOp, indexedValues);
+      Operation *callOp = std_call(funcOp, indexedValues);
       assert(callOp->getNumResults() == genericOp.getNumOutputs());
 
       // 3. Emit std_store.
@@ -359,7 +355,7 @@ class LinalgScopedEmitter<IndexedValueType, IndexedGenericOp> {
 
     if (auto funcOp = indexedGenericOp.getFunction()) {
       // 2. Emit call.
-      Operation *callOp = call(funcOp, indexedValues);
+      Operation *callOp = std_call(funcOp, indexedValues);
       assert(callOp->getNumResults() == indexedGenericOp.getNumOutputs());
 
       // 3. Emit std_store.
@@ -442,15 +438,15 @@ LogicalResult LinalgOpToLoopsImpl<LoopTy, IndexedValueTy, ConcreteOpTy>::doit(
     return success();
   }
 
-  SmallVector<IndexHandle, 4> allIvs(nLoops);
+  SmallVector<ValueHandle, 4> allIvs(nLoops, ValueHandle(b.getIndexType()));
   SmallVector<ValueHandle *, 4> allPIvs =
-      makeHandlePointers(MutableArrayRef<IndexHandle>(allIvs));
+      makeHandlePointers(MutableArrayRef<ValueHandle>(allIvs));
   auto loopRanges = emitLoopRanges(scope.getBuilder(), scope.getLocation(),
                                    invertedMap, getViewSizes(b, linalgOp));
   assert(loopRanges.size() == allIvs.size());
 
   GenericLoopNestRangeBuilder<LoopTy>(allPIvs, loopRanges)([&] {
-    auto allIvValues = extractValues(allIvs);
+    SmallVector<Value, 4> allIvValues(allIvs.begin(), allIvs.end());
     LinalgScopedEmitter<IndexedValueTy, ConcreteOpTy>::emitScalarImplementation(
         allIvValues, linalgOp);
   });
@@ -568,26 +564,26 @@ void LowerLinalgToLoopsPass<LoopType, IndexedValueType>::runOnFunction() {
 
 std::unique_ptr<OpPassBase<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
   return std::make_unique<
-      LowerLinalgToLoopsPass<loop::ForOp, IndexedStdValue>>();
+      LowerLinalgToLoopsPass<loop::ForOp, StdIndexedValue>>();
 }
 
 std::unique_ptr<OpPassBase<FuncOp>>
 mlir::createConvertLinalgToParallelLoopsPass() {
   return std::make_unique<
-      LowerLinalgToLoopsPass<loop::ParallelOp, IndexedStdValue>>();
+      LowerLinalgToLoopsPass<loop::ParallelOp, StdIndexedValue>>();
 }
 
 std::unique_ptr<OpPassBase<FuncOp>>
 mlir::createConvertLinalgToAffineLoopsPass() {
   return std::make_unique<
-      LowerLinalgToLoopsPass<AffineForOp, IndexedAffineValue>>();
+      LowerLinalgToLoopsPass<AffineForOp, AffineIndexedValue>>();
 }
 
 /// Emits a loop nest of `loop.for` with the proper body for `op`.
 template <typename ConcreteOp>
 LogicalResult mlir::linalg::linalgOpToLoops(PatternRewriter &rewriter,
                                             Operation *op) {
-  return LinalgOpToLoopsImpl<loop::ForOp, IndexedStdValue, ConcreteOp>::doit(
+  return LinalgOpToLoopsImpl<loop::ForOp, StdIndexedValue, ConcreteOp>::doit(
       op, rewriter);
 }
 
@@ -595,7 +591,7 @@ LogicalResult mlir::linalg::linalgOpToLoops(PatternRewriter &rewriter,
 template <typename ConcreteOp>
 LogicalResult mlir::linalg::linalgOpToAffineLoops(PatternRewriter &rewriter,
                                                   Operation *op) {
-  return LinalgOpToLoopsImpl<AffineForOp, IndexedAffineValue, ConcreteOp>::doit(
+  return LinalgOpToLoopsImpl<AffineForOp, AffineIndexedValue, ConcreteOp>::doit(
       op, rewriter);
 }
 
@@ -603,7 +599,7 @@ LogicalResult mlir::linalg::linalgOpToAffineLoops(PatternRewriter &rewriter,
 template <typename ConcreteOp>
 LogicalResult mlir::linalg::linalgOpToParallelLoops(PatternRewriter &rewriter,
                                                     Operation *op) {
-  return LinalgOpToLoopsImpl<loop::ParallelOp, IndexedStdValue,
+  return LinalgOpToLoopsImpl<loop::ParallelOp, StdIndexedValue,
                              ConcreteOp>::doit(op, rewriter);
 }
 
@@ -630,18 +626,18 @@ template LogicalResult
 mlir::linalg::linalgOpToParallelLoops<GenericOp>(PatternRewriter &rewriter,
                                                  Operation *op);
 
-static PassRegistration<LowerLinalgToLoopsPass<loop::ForOp, IndexedStdValue>>
+static PassRegistration<LowerLinalgToLoopsPass<loop::ForOp, StdIndexedValue>>
     structuredLoopsPass(
         "convert-linalg-to-loops",
         "Lower the operations from the linalg dialect into loops");
 
 static PassRegistration<
-    LowerLinalgToLoopsPass<loop::ParallelOp, IndexedStdValue>>
+    LowerLinalgToLoopsPass<loop::ParallelOp, StdIndexedValue>>
     parallelLoopsPass(
         "convert-linalg-to-parallel-loops",
         "Lower the operations from the linalg dialect into parallel loops");
 
-static PassRegistration<LowerLinalgToLoopsPass<AffineForOp, IndexedAffineValue>>
+static PassRegistration<LowerLinalgToLoopsPass<AffineForOp, AffineIndexedValue>>
     affineLoopsPass(
         "convert-linalg-to-affine-loops",
         "Lower the operations from the linalg dialect into affine loops");

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
index 618fcfe5c04f..bc39b1aab0ff 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
@@ -14,9 +14,8 @@
 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/VectorOps/VectorOps.h"
-#include "mlir/EDSC/Helpers.h"
-#include "mlir/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/Matchers.h"
 #include "mlir/IR/PatternMatch.h"
@@ -191,8 +190,6 @@ LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
 
 SmallVector<Value, 0> mlir::linalg::vectorizeLinalgOp(PatternRewriter &rewriter,
                                                       Operation *op) {
-  using edsc::intrinsics::std_load;
-  using edsc::intrinsics::std_store;
   using vector_contract = edsc::intrinsics::ValueBuilder<vector::ContractionOp>;
   using vector_broadcast = edsc::intrinsics::ValueBuilder<vector::BroadcastOp>;
   using vector_type_cast = edsc::intrinsics::ValueBuilder<vector::TypeCastOp>;

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index cb5cd3c0d41e..e24f7c737bdc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -10,13 +10,14 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "mlir/Dialect/AffineOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
 #include "mlir/Dialect/LoopOps/LoopOps.h"
-#include "mlir/EDSC/Helpers.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineExprVisitor.h"
 #include "mlir/IR/AffineMap.h"
@@ -37,6 +38,9 @@ using namespace mlir::loop;
 
 using llvm::SetVector;
 
+using folded_affine_min = folded::ValueBuilder<AffineMinOp>;
+using folded_linalg_range = folded::ValueBuilder<linalg::RangeOp>;
+
 #define DEBUG_TYPE "linalg-promotion"
 
 static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options");
@@ -50,10 +54,10 @@ static Value allocBuffer(Type elementType, Value size, bool dynamicBuffers) {
   auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
   if (!dynamicBuffers)
     if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size.getDefiningOp()))
-      return alloc(
+      return std_alloc(
           MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx)));
-  Value mul = muli(constant_index(width), size);
-  return alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul);
+  Value mul = std_muli(std_constant_index(width), size);
+  return std_alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul);
 }
 
 // Performs promotion of a `subView` into a local buffer of the size of the
@@ -77,8 +81,8 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
                                            SubViewOp subView,
                                            bool dynamicBuffers,
                                            OperationFolder *folder) {
-  auto zero = constant_index(folder, 0);
-  auto one = constant_index(folder, 1);
+  auto zero = folded_std_constant_index(folder, 0);
+  auto one = folded_std_constant_index(folder, 1);
 
   auto viewType = subView.getType();
   auto rank = viewType.getRank();
@@ -90,15 +94,15 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
     auto rank = en.index();
     auto rangeValue = en.value();
     Value d = rangeValue.size;
-    allocSize = muli(folder, allocSize, d).getValue();
+    allocSize = folded_std_muli(folder, allocSize, d).getValue();
     fullRanges.push_back(d);
     partialRanges.push_back(
-        linalg_range(folder, zero, dim(subView, rank), one));
+        folded_linalg_range(folder, zero, std_dim(subView, rank), one));
   }
   SmallVector<int64_t, 4> dynSizes(fullRanges.size(), -1);
   auto buffer =
       allocBuffer(viewType.getElementType(), allocSize, dynamicBuffers);
-  auto fullLocalView = view(
+  auto fullLocalView = std_view(
       MemRefType::get(dynSizes, viewType.getElementType()), buffer, fullRanges);
   auto partialLocalView = linalg_slice(fullLocalView, partialRanges);
   return PromotionInfo{buffer, fullLocalView, partialLocalView};
@@ -135,7 +139,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
     // TODO(ntv): value to fill with should be related to the operation.
     // For now, just use APFloat(0.0f).
     auto t = subView.getType().getElementType().cast<FloatType>();
-    Value fillVal = constant_float(folder, APFloat(0.0f), t);
+    Value fillVal = folded_std_constant_float(folder, APFloat(0.0f), t);
     // TODO(ntv): fill is only necessary if `promotionInfo` has a full local
     // view that is 
diff erent from the partial local view and we are on the
     // boundary.
@@ -198,7 +202,7 @@ LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op,
 
   // 4. Dealloc local buffers.
   for (const auto &pi : promotedBufferAndViews)
-    dealloc(pi.buffer);
+    std_dealloc(pi.buffer);
 
   return res;
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 51565be16572..aae903edef0c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -10,13 +10,13 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/Dialect/AffineOps/EDSC/Intrinsics.h"
+#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/Linalg/Passes.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
-#include "mlir/Dialect/LoopOps/LoopOps.h"
-#include "mlir/EDSC/Helpers.h"
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineExprVisitor.h"
 #include "mlir/IR/AffineMap.h"
@@ -34,6 +34,8 @@ using namespace mlir::edsc::intrinsics;
 using namespace mlir::linalg;
 using namespace mlir::loop;
 
+using folded_affine_min = folded::ValueBuilder<AffineMinOp>;
+
 #define DEBUG_TYPE "linalg-tiling"
 
 static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options");
@@ -83,8 +85,8 @@ makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
   // Create a new range with the applied tile sizes.
   SmallVector<SubViewOp::Range, 4> res;
   for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx) {
-    res.push_back(SubViewOp::Range{constant_index(folder, 0), viewSizes[idx],
-                                   tileSizes[idx]});
+    res.push_back(SubViewOp::Range{folded_std_constant_index(folder, 0),
+                                   viewSizes[idx], tileSizes[idx]});
   }
   return std::make_tuple(res, loopIndexToRangeIndex);
 }
@@ -239,16 +241,15 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
                            [](Value v) { return !isZero(v); })) &&
          "expected as many ivs as non-zero sizes");
 
-  using edsc::intrinsics::select;
-  using edsc::op::operator+;
-  using edsc::op::operator<;
+  using namespace edsc::op;
 
   // Construct (potentially temporary) mins and maxes on which to apply maps
   // that define tile subviews.
   SmallVector<Value, 8> lbs, subViewSizes;
   for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) {
     bool isTiled = !isZero(tileSizes[idx]);
-    lbs.push_back(isTiled ? ivs[idxIvs++] : (Value)constant_index(folder, 0));
+    lbs.push_back(isTiled ? ivs[idxIvs++]
+                          : (Value)folded_std_constant_index(folder, 0));
     subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]);
   }
 
@@ -276,9 +277,9 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
     strides.reserve(rank);
     for (unsigned r = 0; r < rank; ++r) {
       if (!isTiled(map.getSubMap({r}), tileSizes)) {
-        offsets.push_back(constant_index(folder, 0));
-        sizes.push_back(dim(view, r));
-        strides.push_back(constant_index(folder, 1));
+        offsets.push_back(folded_std_constant_index(folder, 0));
+        sizes.push_back(std_dim(view, r));
+        strides.push_back(folded_std_constant_index(folder, 1));
         continue;
       }
 
@@ -302,13 +303,13 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
             {getAffineDimExpr(/*position=*/0, b.getContext()),
              getAffineDimExpr(/*position=*/1, b.getContext()) -
                  getAffineDimExpr(/*position=*/2, b.getContext())});
-        auto d = dim(folder, view, r);
-        size = affine_min(folder, b.getIndexType(), minMap,
-                          ValueRange{size, d, offset});
+        auto d = folded_std_dim(folder, view, r);
+        size = folded_affine_min(folder, b.getIndexType(), minMap,
+                                 ValueRange{size, d, offset});
       }
 
       sizes.push_back(size);
-      strides.push_back(constant_index(folder, 1));
+      strides.push_back(folded_std_constant_index(folder, 1));
     }
 
     res.push_back(b.create<SubViewOp>(loc, view, offsets, sizes, strides));
@@ -367,8 +368,8 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes,
 
   // 3. Create the tiled loops.
   LinalgOp res = op;
-  SmallVector<IndexHandle, 4> ivs(loopRanges.size());
-  auto pivs = makeHandlePointers(MutableArrayRef<IndexHandle>(ivs));
+  auto ivs = ValueHandle::makeIndexHandles(loopRanges.size());
+  auto pivs = makeHandlePointers(MutableArrayRef<ValueHandle>(ivs));
   // Convert SubViewOp::Range to linalg_range.
   SmallVector<Value, 4> linalgRanges;
   for (auto &range : loopRanges) {
@@ -434,11 +435,11 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ArrayRef<int64_t> tileSizes,
   SmallVector<Value, 8> tileSizeValues;
   tileSizeValues.reserve(tileSizes.size());
   for (auto ts : tileSizes)
-    tileSizeValues.push_back(constant_index(folder, ts));
+    tileSizeValues.push_back(folded_std_constant_index(folder, ts));
   // Pad tile sizes with zero values to enforce our convention.
   if (tileSizeValues.size() < nLoops) {
     for (unsigned i = tileSizeValues.size(); i < nLoops; ++i)
-      tileSizeValues.push_back(constant_index(folder, 0));
+      tileSizeValues.push_back(folded_std_constant_index(folder, 0));
   }
 
   return tileLinalgOpImpl<LoopTy>(b, op, tileSizeValues, permutation, folder);

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index cf8e5ff92739..29a4a679fb2f 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -11,11 +11,11 @@
 //===----------------------------------------------------------------------===//
 
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
+#include "mlir/Dialect/AffineOps/AffineOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
 #include "mlir/Dialect/LoopOps/LoopOps.h"
 #include "mlir/Dialect/StandardOps/Ops.h"
-#include "mlir/EDSC/Helpers.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/AffineMap.h"
 #include "mlir/IR/Matchers.h"
@@ -25,8 +25,6 @@
 #include "mlir/Transforms/FoldUtils.h"
 
 using namespace mlir;
-using namespace mlir::edsc;
-using namespace mlir::edsc::intrinsics;
 using namespace mlir::linalg;
 using namespace mlir::loop;
 

diff  --git a/mlir/lib/Dialect/LoopOps/CMakeLists.txt b/mlir/lib/Dialect/LoopOps/CMakeLists.txt
index 8dcd97616bed..27f50068ec07 100644
--- a/mlir/lib/Dialect/LoopOps/CMakeLists.txt
+++ b/mlir/lib/Dialect/LoopOps/CMakeLists.txt
@@ -1,9 +1,23 @@
 file(GLOB globbed *.c *.cpp)
 add_llvm_library(MLIRLoopOps
   ${globbed}
+  EDSC/Builders.cpp
 
   ADDITIONAL_HEADER_DIRS
   ${MLIR_MAIN_INCLUDE_DIR}/mlir/LoopOps
   )
-add_dependencies(MLIRLoopOps MLIRLoopLikeInterfaceIncGen MLIRLoopOpsIncGen MLIRStandardOps LLVMSupport)
-target_link_libraries(MLIRLoopOps MLIRStandardOps LLVMSupport MLIRIR)
+add_dependencies(MLIRLoopOps
+
+  MLIREDSC
+  MLIRLoopLikeInterfaceIncGen
+  MLIRLoopOpsIncGen
+  MLIRStandardOps
+  LLVMSupport
+  )
+target_link_libraries(MLIRLoopOps
+
+  MLIREDSC
+  MLIRIR
+  MLIRStandardOps
+  LLVMSupport
+  )

diff  --git a/mlir/lib/Dialect/LoopOps/EDSC/Builders.cpp b/mlir/lib/Dialect/LoopOps/EDSC/Builders.cpp
new file mode 100644
index 000000000000..ad9e1b74ef4d
--- /dev/null
+++ b/mlir/lib/Dialect/LoopOps/EDSC/Builders.cpp
@@ -0,0 +1,92 @@
+//===- Builders.cpp - MLIR Declarative Builder Classes --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/IR/AffineExpr.h"
+#include "mlir/IR/AffineMap.h"
+
+using namespace mlir;
+using namespace mlir::edsc;
+
+mlir::edsc::ParallelLoopNestBuilder::ParallelLoopNestBuilder(
+    ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
+    ArrayRef<ValueHandle> ubs, ArrayRef<ValueHandle> steps) {
+  assert(ivs.size() == lbs.size() && "Mismatch in number of arguments");
+  assert(ivs.size() == ubs.size() && "Mismatch in number of arguments");
+  assert(ivs.size() == steps.size() && "Mismatch in number of arguments");
+
+  loops.emplace_back(makeParallelLoopBuilder(ivs, lbs, ubs, steps));
+}
+
+void mlir::edsc::ParallelLoopNestBuilder::operator()(
+    function_ref<void(void)> fun) {
+  if (fun)
+    fun();
+  // Iterate on the calling operator() on all the loops in the nest.
+  // The iteration order is from innermost to outermost because enter/exit needs
+  // to be asymmetric (i.e. enter() occurs on LoopBuilder construction, exit()
+  // occurs on calling operator()). The asymmetry is required for properly
+  // nesting imperfectly nested regions (see LoopBuilder::operator()).
+  for (auto lit = loops.rbegin(), eit = loops.rend(); lit != eit; ++lit)
+    (*lit)();
+}
+
+mlir::edsc::LoopNestBuilder::LoopNestBuilder(ArrayRef<ValueHandle *> ivs,
+                                             ArrayRef<ValueHandle> lbs,
+                                             ArrayRef<ValueHandle> ubs,
+                                             ArrayRef<ValueHandle> steps) {
+  assert(ivs.size() == lbs.size() && "expected size of ivs and lbs to match");
+  assert(ivs.size() == ubs.size() && "expected size of ivs and ubs to match");
+  assert(ivs.size() == steps.size() &&
+         "expected size of ivs and steps to match");
+  loops.reserve(ivs.size());
+  for (auto it : llvm::zip(ivs, lbs, ubs, steps)) {
+    loops.emplace_back(makeLoopBuilder(std::get<0>(it), std::get<1>(it),
+                                       std::get<2>(it), std::get<3>(it)));
+  }
+  assert(loops.size() == ivs.size() && "Mismatch loops vs ivs size");
+}
+
+void mlir::edsc::LoopNestBuilder::LoopNestBuilder::operator()(
+    std::function<void(void)> fun) {
+  if (fun)
+    fun();
+  for (auto &lit : reverse(loops))
+    lit({});
+}
+
+LoopBuilder mlir::edsc::makeParallelLoopBuilder(ArrayRef<ValueHandle *> ivs,
+                                                ArrayRef<ValueHandle> lbHandles,
+                                                ArrayRef<ValueHandle> ubHandles,
+                                                ArrayRef<ValueHandle> steps) {
+  LoopBuilder result;
+  auto opHandle = OperationHandle::create<loop::ParallelOp>(
+      SmallVector<Value, 4>(lbHandles.begin(), lbHandles.end()),
+      SmallVector<Value, 4>(ubHandles.begin(), ubHandles.end()),
+      SmallVector<Value, 4>(steps.begin(), steps.end()));
+
+  loop::ParallelOp parallelOp =
+      cast<loop::ParallelOp>(*opHandle.getOperation());
+  for (size_t i = 0, e = ivs.size(); i < e; ++i)
+    *ivs[i] = ValueHandle(parallelOp.getBody()->getArgument(i));
+  result.enter(parallelOp.getBody(), /*prev=*/1);
+  return result;
+}
+
+mlir::edsc::LoopBuilder mlir::edsc::makeLoopBuilder(ValueHandle *iv,
+                                                    ValueHandle lbHandle,
+                                                    ValueHandle ubHandle,
+                                                    ValueHandle stepHandle) {
+  mlir::edsc::LoopBuilder result;
+  auto forOp =
+      OperationHandle::createOp<loop::ForOp>(lbHandle, ubHandle, stepHandle);
+  *iv = ValueHandle(forOp.getInductionVar());
+  auto *body = loop::getForInductionVarOwner(iv->getValue()).getBody();
+  result.enter(body, /*prev=*/1);
+  return result;
+}

diff  --git a/mlir/lib/Dialect/StandardOps/CMakeLists.txt b/mlir/lib/Dialect/StandardOps/CMakeLists.txt
index 87ea1e67606c..be7b69ccbeb0 100644
--- a/mlir/lib/Dialect/StandardOps/CMakeLists.txt
+++ b/mlir/lib/Dialect/StandardOps/CMakeLists.txt
@@ -1,14 +1,23 @@
 file(GLOB globbed *.c *.cpp)
 add_llvm_library(MLIRStandardOps
   ${globbed}
+  EDSC/Builders.cpp
+  EDSC/Intrinsics.cpp
 
   ADDITIONAL_HEADER_DIRS
   ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/StandardOps
   )
 add_dependencies(MLIRStandardOps
+
   MLIRCallOpInterfacesIncGen
+  MLIREDSC
+  MLIRIR
   MLIRStandardOpsIncGen
+  LLVMSupport
+  )
+target_link_libraries(MLIRStandardOps
+
+  MLIREDSC
   MLIRIR
   LLVMSupport
   )
-target_link_libraries(MLIRStandardOps MLIRIR LLVMSupport)

diff  --git a/mlir/lib/EDSC/Helpers.cpp b/mlir/lib/Dialect/StandardOps/EDSC/Builders.cpp
similarity index 59%
rename from mlir/lib/EDSC/Helpers.cpp
rename to mlir/lib/Dialect/StandardOps/EDSC/Builders.cpp
index 79decddda6e1..1232e51341a1 100644
--- a/mlir/lib/EDSC/Helpers.cpp
+++ b/mlir/lib/Dialect/StandardOps/EDSC/Builders.cpp
@@ -1,4 +1,4 @@
-//===- Helpers.cpp - MLIR Declarative Helper Functionality ----------------===//
+//===- Builders.cpp - MLIR Declarative Builder Classes --------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,12 +6,13 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "mlir/EDSC/Helpers.h"
-#include "mlir/Dialect/StandardOps/Ops.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
+#include "mlir/IR/AffineMap.h"
 
 using namespace mlir;
 using namespace mlir::edsc;
+using namespace mlir::edsc::intrinsics;
 
 static SmallVector<ValueHandle, 8> getMemRefSizes(Value memRef) {
   MemRefType memRefType = memRef.getType().cast<MemRefType>();
@@ -21,32 +22,28 @@ static SmallVector<ValueHandle, 8> getMemRefSizes(Value memRef) {
   res.reserve(memRefType.getShape().size());
   const auto &shape = memRefType.getShape();
   for (unsigned idx = 0, n = shape.size(); idx < n; ++idx) {
-    if (shape[idx] == -1) {
-      res.push_back(ValueHandle::create<DimOp>(memRef, idx));
-    } else {
-      res.push_back(static_cast<index_type>(shape[idx]));
-    }
+    if (shape[idx] == -1)
+      res.push_back(std_dim(memRef, idx));
+    else
+      res.push_back(std_constant_index(shape[idx]));
   }
   return res;
 }
 
-mlir::edsc::MemRefView::MemRefView(Value v) : base(v) {
-  assert(v.getType().isa<MemRefType>() && "MemRefType expected");
-
+mlir::edsc::MemRefBoundsCapture::MemRefBoundsCapture(Value v) : base(v) {
   auto memrefSizeValues = getMemRefSizes(v);
-  for (auto &size : memrefSizeValues) {
-    lbs.push_back(static_cast<index_type>(0));
-    ubs.push_back(size);
+  for (auto s : memrefSizeValues) {
+    lbs.push_back(std_constant_index(0));
+    ubs.push_back(s);
     steps.push_back(1);
   }
 }
 
-mlir::edsc::VectorView::VectorView(Value v) : base(v) {
+mlir::edsc::VectorBoundsCapture::VectorBoundsCapture(Value v) : base(v) {
   auto vectorType = v.getType().cast<VectorType>();
-
   for (auto s : vectorType.getShape()) {
-    lbs.push_back(static_cast<index_type>(0));
-    ubs.push_back(static_cast<index_type>(s));
+    lbs.push_back(std_constant_index(0));
+    ubs.push_back(std_constant_index(s));
     steps.push_back(1);
   }
 }

diff  --git a/mlir/lib/EDSC/Intrinsics.cpp b/mlir/lib/Dialect/StandardOps/EDSC/Intrinsics.cpp
similarity index 98%
rename from mlir/lib/EDSC/Intrinsics.cpp
rename to mlir/lib/Dialect/StandardOps/EDSC/Intrinsics.cpp
index 20d732941c43..466ee049515f 100644
--- a/mlir/lib/EDSC/Intrinsics.cpp
+++ b/mlir/lib/Dialect/StandardOps/EDSC/Intrinsics.cpp
@@ -6,8 +6,7 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "mlir/EDSC/Intrinsics.h"
-#include "mlir/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 
 using namespace mlir;

diff  --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp
index f6bd916b3461..919990a067e9 100644
--- a/mlir/lib/EDSC/Builders.cpp
+++ b/mlir/lib/EDSC/Builders.cpp
@@ -7,8 +7,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "mlir/EDSC/Builders.h"
-#include "mlir/Dialect/StandardOps/Ops.h"
 #include "mlir/IR/AffineExpr.h"
+#include "mlir/IR/AffineMap.h"
 
 #include "llvm/ADT/Optional.h"
 
@@ -65,13 +65,6 @@ MLIRContext *mlir::edsc::ScopedContext::getContext() {
   return getBuilder().getContext();
 }
 
-mlir::edsc::ValueHandle::ValueHandle(index_type cst) {
-  auto &b = ScopedContext::getBuilder();
-  auto loc = ScopedContext::getLocation();
-  v = b.create<ConstantIndexOp>(loc, cst.v).getResult();
-  t = v.getType();
-}
-
 ValueHandle &mlir::edsc::ValueHandle::operator=(const ValueHandle &other) {
   assert(t == other.t && "Wrong type capture");
   assert(!v && "ValueHandle has already been captured, use a new name!");
@@ -79,28 +72,13 @@ ValueHandle &mlir::edsc::ValueHandle::operator=(const ValueHandle &other) {
   return *this;
 }
 
-ValueHandle
-mlir::edsc::ValueHandle::createComposedAffineApply(AffineMap map,
-                                                   ArrayRef<Value> operands) {
-  Operation *op =
-      makeComposedAffineApply(ScopedContext::getBuilder(),
-                              ScopedContext::getLocation(), map, operands)
-          .getOperation();
-  assert(op->getNumResults() == 1 && "Not a single result AffineApply");
-  return ValueHandle(op->getResult(0));
-}
-
 ValueHandle ValueHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
                                 ArrayRef<Type> resultTypes,
                                 ArrayRef<NamedAttribute> attributes) {
   Operation *op =
       OperationHandle::create(name, operands, resultTypes, attributes);
-  if (op->getNumResults() == 1) {
+  if (op->getNumResults() == 1)
     return ValueHandle(op->getResult(0));
-  }
-  if (auto f = dyn_cast<AffineForOp>(op)) {
-    return ValueHandle(f.getInductionVar());
-  }
   llvm_unreachable("unsupported operation, use an OperationHandle instead");
 }
 
@@ -149,75 +127,6 @@ BlockHandle mlir::edsc::BlockHandle::createInRegion(Region &region,
   return res;
 }
 
-static Optional<ValueHandle> emitStaticFor(ArrayRef<ValueHandle> lbs,
-                                           ArrayRef<ValueHandle> ubs,
-                                           int64_t step) {
-  if (lbs.size() != 1 || ubs.size() != 1)
-    return Optional<ValueHandle>();
-
-  auto *lbDef = lbs.front().getValue().getDefiningOp();
-  auto *ubDef = ubs.front().getValue().getDefiningOp();
-  if (!lbDef || !ubDef)
-    return Optional<ValueHandle>();
-
-  auto lbConst = dyn_cast<ConstantIndexOp>(lbDef);
-  auto ubConst = dyn_cast<ConstantIndexOp>(ubDef);
-  if (!lbConst || !ubConst)
-    return Optional<ValueHandle>();
-
-  return ValueHandle::create<AffineForOp>(lbConst.getValue(),
-                                          ubConst.getValue(), step);
-}
-
-mlir::edsc::LoopBuilder mlir::edsc::LoopBuilder::makeAffine(
-    ValueHandle *iv, ArrayRef<ValueHandle> lbHandles,
-    ArrayRef<ValueHandle> ubHandles, int64_t step) {
-  mlir::edsc::LoopBuilder result;
-  if (auto staticFor = emitStaticFor(lbHandles, ubHandles, step)) {
-    *iv = staticFor.getValue();
-  } else {
-    SmallVector<Value, 4> lbs(lbHandles.begin(), lbHandles.end());
-    SmallVector<Value, 4> ubs(ubHandles.begin(), ubHandles.end());
-    *iv = ValueHandle::create<AffineForOp>(
-        lbs, ScopedContext::getBuilder().getMultiDimIdentityMap(lbs.size()),
-        ubs, ScopedContext::getBuilder().getMultiDimIdentityMap(ubs.size()),
-        step);
-  }
-  auto *body = getForInductionVarOwner(iv->getValue()).getBody();
-  result.enter(body, /*prev=*/1);
-  return result;
-}
-
-mlir::edsc::LoopBuilder mlir::edsc::LoopBuilder::makeParallel(
-    ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbHandles,
-    ArrayRef<ValueHandle> ubHandles, ArrayRef<ValueHandle> steps) {
-  mlir::edsc::LoopBuilder result;
-  auto opHandle = OperationHandle::create<loop::ParallelOp>(
-      SmallVector<Value, 4>(lbHandles.begin(), lbHandles.end()),
-      SmallVector<Value, 4>(ubHandles.begin(), ubHandles.end()),
-      SmallVector<Value, 4>(steps.begin(), steps.end()));
-
-  loop::ParallelOp parallelOp =
-      cast<loop::ParallelOp>(*opHandle.getOperation());
-  for (size_t i = 0, e = ivs.size(); i < e; ++i)
-    *ivs[i] = ValueHandle(parallelOp.getBody()->getArgument(i));
-  result.enter(parallelOp.getBody(), /*prev=*/1);
-  return result;
-}
-
-mlir::edsc::LoopBuilder
-mlir::edsc::LoopBuilder::makeLoop(ValueHandle *iv, ValueHandle lbHandle,
-                                  ValueHandle ubHandle,
-                                  ValueHandle stepHandle) {
-  mlir::edsc::LoopBuilder result;
-  auto forOp =
-      OperationHandle::createOp<loop::ForOp>(lbHandle, ubHandle, stepHandle);
-  *iv = ValueHandle(forOp.getInductionVar());
-  auto *body = loop::getForInductionVarOwner(iv->getValue()).getBody();
-  result.enter(body, /*prev=*/1);
-  return result;
-}
-
 void mlir::edsc::LoopBuilder::operator()(function_ref<void(void)> fun) {
   // Call to `exit` must be explicit and asymmetric (cannot happen in the
   // destructor) because of ordering wrt comma operator.
@@ -242,83 +151,6 @@ void mlir::edsc::LoopBuilder::operator()(function_ref<void(void)> fun) {
   exit();
 }
 
-mlir::edsc::AffineLoopNestBuilder::AffineLoopNestBuilder(
-    ValueHandle *iv, ArrayRef<ValueHandle> lbs, ArrayRef<ValueHandle> ubs,
-    int64_t step) {
-  loops.emplace_back(LoopBuilder::makeAffine(iv, lbs, ubs, step));
-}
-
-mlir::edsc::AffineLoopNestBuilder::AffineLoopNestBuilder(
-    ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
-    ArrayRef<ValueHandle> ubs, ArrayRef<int64_t> steps) {
-  assert(ivs.size() == lbs.size() && "Mismatch in number of arguments");
-  assert(ivs.size() == ubs.size() && "Mismatch in number of arguments");
-  assert(ivs.size() == steps.size() && "Mismatch in number of arguments");
-  for (auto it : llvm::zip(ivs, lbs, ubs, steps))
-    loops.emplace_back(LoopBuilder::makeAffine(
-        std::get<0>(it), std::get<1>(it), std::get<2>(it), std::get<3>(it)));
-}
-
-void mlir::edsc::AffineLoopNestBuilder::operator()(
-    function_ref<void(void)> fun) {
-  if (fun)
-    fun();
-  // Iterate on the calling operator() on all the loops in the nest.
-  // The iteration order is from innermost to outermost because enter/exit needs
-  // to be asymmetric (i.e. enter() occurs on LoopBuilder construction, exit()
-  // occurs on calling operator()). The asymmetry is required for properly
-  // nesting imperfectly nested regions (see LoopBuilder::operator()).
-  for (auto lit = loops.rbegin(), eit = loops.rend(); lit != eit; ++lit)
-    (*lit)();
-}
-
-mlir::edsc::ParallelLoopNestBuilder::ParallelLoopNestBuilder(
-    ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> lbs,
-    ArrayRef<ValueHandle> ubs, ArrayRef<ValueHandle> steps) {
-  assert(ivs.size() == lbs.size() && "Mismatch in number of arguments");
-  assert(ivs.size() == ubs.size() && "Mismatch in number of arguments");
-  assert(ivs.size() == steps.size() && "Mismatch in number of arguments");
-
-  loops.emplace_back(LoopBuilder::makeParallel(ivs, lbs, ubs, steps));
-}
-
-void mlir::edsc::ParallelLoopNestBuilder::operator()(
-    function_ref<void(void)> fun) {
-  if (fun)
-    fun();
-  // Iterate on the calling operator() on all the loops in the nest.
-  // The iteration order is from innermost to outermost because enter/exit needs
-  // to be asymmetric (i.e. enter() occurs on LoopBuilder construction, exit()
-  // occurs on calling operator()). The asymmetry is required for properly
-  // nesting imperfectly nested regions (see LoopBuilder::operator()).
-  for (auto lit = loops.rbegin(), eit = loops.rend(); lit != eit; ++lit)
-    (*lit)();
-}
-
-mlir::edsc::LoopNestBuilder::LoopNestBuilder(ArrayRef<ValueHandle *> ivs,
-                                             ArrayRef<ValueHandle> lbs,
-                                             ArrayRef<ValueHandle> ubs,
-                                             ArrayRef<ValueHandle> steps) {
-  assert(ivs.size() == lbs.size() && "expected size of ivs and lbs to match");
-  assert(ivs.size() == ubs.size() && "expected size of ivs and ubs to match");
-  assert(ivs.size() == steps.size() &&
-         "expected size of ivs and steps to match");
-  loops.reserve(ivs.size());
-  for (auto it : llvm::zip(ivs, lbs, ubs, steps)) {
-    loops.emplace_back(LoopBuilder::makeLoop(std::get<0>(it), std::get<1>(it),
-                                             std::get<2>(it), std::get<3>(it)));
-  }
-  assert(loops.size() == ivs.size() && "Mismatch loops vs ivs size");
-}
-
-void LoopNestBuilder::LoopNestBuilder::operator()(
-    std::function<void(void)> fun) {
-  if (fun)
-    fun();
-  for (auto &lit : reverse(loops))
-    lit({});
-}
-
 mlir::edsc::BlockBuilder::BlockBuilder(BlockHandle bh, Append) {
   assert(bh && "Expected already captured BlockHandle");
   enter(bh.getBlock());
@@ -367,194 +199,3 @@ void mlir::edsc::BlockBuilder::operator()(function_ref<void(void)> fun) {
     fun();
   exit();
 }
-
-template <typename Op>
-static ValueHandle createBinaryHandle(ValueHandle lhs, ValueHandle rhs) {
-  return ValueHandle::create<Op>(lhs.getValue(), rhs.getValue());
-}
-
-static std::pair<AffineExpr, Value>
-categorizeValueByAffineType(MLIRContext *context, Value val, unsigned &numDims,
-                            unsigned &numSymbols) {
-  AffineExpr d;
-  Value resultVal = nullptr;
-  if (auto constant = dyn_cast_or_null<ConstantIndexOp>(val.getDefiningOp())) {
-    d = getAffineConstantExpr(constant.getValue(), context);
-  } else if (isValidSymbol(val) && !isValidDim(val)) {
-    d = getAffineSymbolExpr(numSymbols++, context);
-    resultVal = val;
-  } else {
-    d = getAffineDimExpr(numDims++, context);
-    resultVal = val;
-  }
-  return std::make_pair(d, resultVal);
-}
-
-static ValueHandle createBinaryIndexHandle(
-    ValueHandle lhs, ValueHandle rhs,
-    function_ref<AffineExpr(AffineExpr, AffineExpr)> affCombiner) {
-  MLIRContext *context = ScopedContext::getContext();
-  unsigned numDims = 0, numSymbols = 0;
-  AffineExpr d0, d1;
-  Value v0, v1;
-  std::tie(d0, v0) =
-      categorizeValueByAffineType(context, lhs.getValue(), numDims, numSymbols);
-  std::tie(d1, v1) =
-      categorizeValueByAffineType(context, rhs.getValue(), numDims, numSymbols);
-  SmallVector<Value, 2> operands;
-  if (v0) {
-    operands.push_back(v0);
-  }
-  if (v1) {
-    operands.push_back(v1);
-  }
-  auto map = AffineMap::get(numDims, numSymbols, {affCombiner(d0, d1)});
-  // TODO: createOrFold when available.
-  return ValueHandle::createComposedAffineApply(map, operands);
-}
-
-template <typename IOp, typename FOp>
-static ValueHandle createBinaryHandle(
-    ValueHandle lhs, ValueHandle rhs,
-    function_ref<AffineExpr(AffineExpr, AffineExpr)> affCombiner) {
-  auto thisType = lhs.getValue().getType();
-  auto thatType = rhs.getValue().getType();
-  assert(thisType == thatType && "cannot mix types in operators");
-  (void)thisType;
-  (void)thatType;
-  if (thisType.isIndex()) {
-    return createBinaryIndexHandle(lhs, rhs, affCombiner);
-  } else if (thisType.isa<IntegerType>()) {
-    return createBinaryHandle<IOp>(lhs, rhs);
-  } else if (thisType.isa<FloatType>()) {
-    return createBinaryHandle<FOp>(lhs, rhs);
-  } else if (thisType.isa<VectorType>() || thisType.isa<TensorType>()) {
-    auto aggregateType = thisType.cast<ShapedType>();
-    if (aggregateType.getElementType().isa<IntegerType>())
-      return createBinaryHandle<IOp>(lhs, rhs);
-    else if (aggregateType.getElementType().isa<FloatType>())
-      return createBinaryHandle<FOp>(lhs, rhs);
-  }
-  llvm_unreachable("failed to create a ValueHandle");
-}
-
-ValueHandle mlir::edsc::op::operator+(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryHandle<AddIOp, AddFOp>(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 + d1; });
-}
-
-ValueHandle mlir::edsc::op::operator-(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryHandle<SubIOp, SubFOp>(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 - d1; });
-}
-
-ValueHandle mlir::edsc::op::operator*(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryHandle<MulIOp, MulFOp>(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 * d1; });
-}
-
-ValueHandle mlir::edsc::op::operator/(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryHandle<SignedDivIOp, DivFOp>(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) -> AffineExpr {
-        llvm_unreachable("only exprs of non-index type support operator/");
-      });
-}
-
-ValueHandle mlir::edsc::op::operator%(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryHandle<SignedRemIOp, RemFOp>(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 % d1; });
-}
-
-ValueHandle mlir::edsc::op::floorDiv(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryIndexHandle(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0.floorDiv(d1); });
-}
-
-ValueHandle mlir::edsc::op::ceilDiv(ValueHandle lhs, ValueHandle rhs) {
-  return createBinaryIndexHandle(
-      lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0.ceilDiv(d1); });
-}
-
-ValueHandle mlir::edsc::op::operator!(ValueHandle value) {
-  assert(value.getType().isInteger(1) && "expected boolean expression");
-  return ValueHandle::create<ConstantIntOp>(1, 1) - value;
-}
-
-ValueHandle mlir::edsc::op::operator&&(ValueHandle lhs, ValueHandle rhs) {
-  assert(lhs.getType().isInteger(1) && "expected boolean expression on LHS");
-  assert(rhs.getType().isInteger(1) && "expected boolean expression on RHS");
-  return lhs * rhs;
-}
-
-ValueHandle mlir::edsc::op::operator||(ValueHandle lhs, ValueHandle rhs) {
-  return !(!lhs && !rhs);
-}
-
-static ValueHandle createIComparisonExpr(CmpIPredicate predicate,
-                                         ValueHandle lhs, ValueHandle rhs) {
-  auto lhsType = lhs.getType();
-  auto rhsType = rhs.getType();
-  (void)lhsType;
-  (void)rhsType;
-  assert(lhsType == rhsType && "cannot mix types in operators");
-  assert((lhsType.isa<IndexType>() || lhsType.isa<IntegerType>()) &&
-         "only integer comparisons are supported");
-
-  auto op = ScopedContext::getBuilder().create<CmpIOp>(
-      ScopedContext::getLocation(), predicate, lhs.getValue(), rhs.getValue());
-  return ValueHandle(op.getResult());
-}
-
-static ValueHandle createFComparisonExpr(CmpFPredicate predicate,
-                                         ValueHandle lhs, ValueHandle rhs) {
-  auto lhsType = lhs.getType();
-  auto rhsType = rhs.getType();
-  (void)lhsType;
-  (void)rhsType;
-  assert(lhsType == rhsType && "cannot mix types in operators");
-  assert(lhsType.isa<FloatType>() && "only float comparisons are supported");
-
-  auto op = ScopedContext::getBuilder().create<CmpFOp>(
-      ScopedContext::getLocation(), predicate, lhs.getValue(), rhs.getValue());
-  return ValueHandle(op.getResult());
-}
-
-// All floating point comparison are ordered through EDSL
-ValueHandle mlir::edsc::op::operator==(ValueHandle lhs, ValueHandle rhs) {
-  auto type = lhs.getType();
-  return type.isa<FloatType>()
-             ? createFComparisonExpr(CmpFPredicate::OEQ, lhs, rhs)
-             : createIComparisonExpr(CmpIPredicate::eq, lhs, rhs);
-}
-ValueHandle mlir::edsc::op::operator!=(ValueHandle lhs, ValueHandle rhs) {
-  auto type = lhs.getType();
-  return type.isa<FloatType>()
-             ? createFComparisonExpr(CmpFPredicate::ONE, lhs, rhs)
-             : createIComparisonExpr(CmpIPredicate::ne, lhs, rhs);
-}
-ValueHandle mlir::edsc::op::operator<(ValueHandle lhs, ValueHandle rhs) {
-  auto type = lhs.getType();
-  return type.isa<FloatType>()
-             ? createFComparisonExpr(CmpFPredicate::OLT, lhs, rhs)
-             :
-             // TODO(ntv,zinenko): signed by default, how about unsigned?
-             createIComparisonExpr(CmpIPredicate::slt, lhs, rhs);
-}
-ValueHandle mlir::edsc::op::operator<=(ValueHandle lhs, ValueHandle rhs) {
-  auto type = lhs.getType();
-  return type.isa<FloatType>()
-             ? createFComparisonExpr(CmpFPredicate::OLE, lhs, rhs)
-             : createIComparisonExpr(CmpIPredicate::sle, lhs, rhs);
-}
-ValueHandle mlir::edsc::op::operator>(ValueHandle lhs, ValueHandle rhs) {
-  auto type = lhs.getType();
-  return type.isa<FloatType>()
-             ? createFComparisonExpr(CmpFPredicate::OGT, lhs, rhs)
-             : createIComparisonExpr(CmpIPredicate::sgt, lhs, rhs);
-}
-ValueHandle mlir::edsc::op::operator>=(ValueHandle lhs, ValueHandle rhs) {
-  auto type = lhs.getType();
-  return type.isa<FloatType>()
-             ? createFComparisonExpr(CmpFPredicate::OGE, lhs, rhs)
-             : createIComparisonExpr(CmpIPredicate::sge, lhs, rhs);
-}

diff  --git a/mlir/lib/EDSC/CMakeLists.txt b/mlir/lib/EDSC/CMakeLists.txt
index 0c43bb7d6167..533d7ec84a40 100644
--- a/mlir/lib/EDSC/CMakeLists.txt
+++ b/mlir/lib/EDSC/CMakeLists.txt
@@ -1,27 +1,19 @@
 set(LLVM_OPTIONAL_SOURCES
   Builders.cpp
   CoreAPIs.cpp
-  Helpers.cpp
-  Intrinsics.cpp
   )
 
 add_llvm_library(MLIREDSC
   Builders.cpp
-  Helpers.cpp
-  Intrinsics.cpp
 
   ADDITIONAL_HEADER_DIRS
   ${MLIR_MAIN_INCLUDE_DIR}/mlir/EDSC
   )
 target_link_libraries(MLIREDSC
   PUBLIC
-  MLIRAffineOps
+
   MLIRIR
-  MLIRLoopOps
-  MLIRStandardOps
   MLIRSupport
-  MLIRTransformUtils
-  MLIRVectorOps
   )
 
 add_llvm_library(MLIREDSCInterface

diff  --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 2ad3f0ca0371..68b7759b036b 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -8,13 +8,11 @@
 
 // RUN: mlir-edsc-builder-api-test | FileCheck %s
 
-#include "mlir/Dialect/AffineOps/AffineOps.h"
-#include "mlir/Dialect/Linalg/EDSC/Builders.h"
+#include "mlir/Dialect/AffineOps/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
-#include "mlir/Dialect/StandardOps/Ops.h"
+#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Helpers.h"
 #include "mlir/EDSC/Intrinsics.h"
 #include "mlir/IR/AffineExpr.h"
 #include "mlir/IR/Builders.h"
@@ -34,6 +32,8 @@
 #include "llvm/Support/raw_ostream.h"
 
 using namespace mlir;
+using namespace mlir::edsc;
+using namespace mlir::edsc::intrinsics;
 
 static MLIRContext &globalContext() {
   static thread_local MLIRContext context;
@@ -50,9 +50,6 @@ static FuncOp makeFunction(StringRef name, ArrayRef<Type> results = {},
 }
 
 TEST_FUNC(builder_dynamic_for_func_args) {
-  using namespace edsc;
-  using namespace edsc::op;
-  using namespace edsc::intrinsics;
   auto indexType = IndexType::get(&globalContext());
   auto f32Type = FloatType::getF32(&globalContext());
   auto f =
@@ -62,16 +59,18 @@ TEST_FUNC(builder_dynamic_for_func_args) {
   ScopedContext scope(builder, f.getLoc());
   ValueHandle i(indexType), j(indexType), lb(f.getArgument(0)),
       ub(f.getArgument(1));
-  ValueHandle f7(constant_float(llvm::APFloat(7.0f), f32Type));
-  ValueHandle f13(constant_float(llvm::APFloat(13.0f), f32Type));
-  ValueHandle i7(constant_int(7, 32));
-  ValueHandle i13(constant_int(13, 32));
+  ValueHandle f7(std_constant_float(llvm::APFloat(7.0f), f32Type));
+  ValueHandle f13(std_constant_float(llvm::APFloat(13.0f), f32Type));
+  ValueHandle i7(std_constant_int(7, 32));
+  ValueHandle i13(std_constant_int(13, 32));
   AffineLoopNestBuilder(&i, lb, ub, 3)([&] {
-    lb *index_type(3) + ub;
-    lb + index_type(3);
+    using namespace edsc::op;
+    lb *std_constant_index(3) + ub;
+    lb + std_constant_index(3);
     AffineLoopNestBuilder(&j, lb, ub, 2)([&] {
-      ceilDiv(index_type(31) * floorDiv(i + j * index_type(3), index_type(32)),
-              index_type(32));
+      ceilDiv(std_constant_index(31) * floorDiv(i + j * std_constant_index(3),
+                                                std_constant_index(32)),
+              std_constant_index(32));
       ((f7 + f13) / f7) % f13 - f7 *f13;
       ((i7 + i13) / i7) % i13 - i7 *i13;
     });
@@ -103,9 +102,6 @@ TEST_FUNC(builder_dynamic_for_func_args) {
 }
 
 TEST_FUNC(builder_dynamic_for) {
-  using namespace edsc;
-  using namespace edsc::op;
-  using namespace edsc::intrinsics;
   auto indexType = IndexType::get(&globalContext());
   auto f = makeFunction("builder_dynamic_for", {},
                         {indexType, indexType, indexType, indexType});
@@ -114,6 +110,7 @@ TEST_FUNC(builder_dynamic_for) {
   ScopedContext scope(builder, f.getLoc());
   ValueHandle i(indexType), a(f.getArgument(0)), b(f.getArgument(1)),
       c(f.getArgument(2)), d(f.getArgument(3));
+  using namespace edsc::op;
   AffineLoopNestBuilder(&i, a - b, c + d, 2)();
 
   // clang-format off
@@ -127,9 +124,6 @@ TEST_FUNC(builder_dynamic_for) {
 }
 
 TEST_FUNC(builder_loop_for) {
-  using namespace edsc;
-  using namespace edsc::op;
-  using namespace edsc::intrinsics;
   auto indexType = IndexType::get(&globalContext());
   auto f = makeFunction("builder_loop_for", {},
                         {indexType, indexType, indexType, indexType});
@@ -138,6 +132,7 @@ TEST_FUNC(builder_loop_for) {
   ScopedContext scope(builder, f.getLoc());
   ValueHandle i(indexType), a(f.getArgument(0)), b(f.getArgument(1)),
       c(f.getArgument(2)), d(f.getArgument(3));
+  using namespace edsc::op;
   LoopNestBuilder(&i, a - b, c + d, a)();
 
   // clang-format off
@@ -151,9 +146,6 @@ TEST_FUNC(builder_loop_for) {
 }
 
 TEST_FUNC(builder_max_min_for) {
-  using namespace edsc;
-  using namespace edsc::op;
-  using namespace edsc::intrinsics;
   auto indexType = IndexType::get(&globalContext());
   auto f = makeFunction("builder_max_min_for", {},
                         {indexType, indexType, indexType, indexType});
@@ -163,7 +155,7 @@ TEST_FUNC(builder_max_min_for) {
   ValueHandle i(indexType), lb1(f.getArgument(0)), lb2(f.getArgument(1)),
       ub1(f.getArgument(2)), ub2(f.getArgument(3));
   AffineLoopNestBuilder(&i, {lb1, lb2}, {ub1, ub2}, 1)();
-  ret();
+  std_ret();
 
   // clang-format off
   // CHECK-LABEL: func @builder_max_min_for(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
@@ -175,8 +167,6 @@ TEST_FUNC(builder_max_min_for) {
 }
 
 TEST_FUNC(builder_blocks) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto f = makeFunction("builder_blocks");
 
@@ -220,8 +210,6 @@ TEST_FUNC(builder_blocks) {
 }
 
 TEST_FUNC(builder_blocks_eager) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto f = makeFunction("builder_blocks_eager");
 
@@ -264,8 +252,6 @@ TEST_FUNC(builder_blocks_eager) {
 }
 
 TEST_FUNC(builder_cond_branch) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   auto f = makeFunction("builder_cond_branch", {},
                         {IntegerType::get(1, &globalContext())});
 
@@ -278,8 +264,8 @@ TEST_FUNC(builder_cond_branch) {
   ValueHandle arg1(c32.getType()), arg2(c64.getType()), arg3(c32.getType());
 
   BlockHandle b1, b2, functionBlock(&f.front());
-  BlockBuilder(&b1, {&arg1})([&] { ret(); });
-  BlockBuilder(&b2, {&arg2, &arg3})([&] { ret(); });
+  BlockBuilder(&b1, {&arg1})([&] { std_ret(); });
+  BlockBuilder(&b2, {&arg2, &arg3})([&] { std_ret(); });
   // Get back to entry block and add a conditional branch
   BlockBuilder(functionBlock, Append())([&] {
     cond_br(funcArg, b1, {c32}, b2, {c64, c42});
@@ -301,8 +287,6 @@ TEST_FUNC(builder_cond_branch) {
 }
 
 TEST_FUNC(builder_cond_branch_eager) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto f = makeFunction("builder_cond_branch_eager", {},
                         {IntegerType::get(1, &globalContext())});
@@ -319,10 +303,10 @@ TEST_FUNC(builder_cond_branch_eager) {
   BlockHandle b1, b2;
   cond_br(funcArg, &b1, {&arg1}, {c32}, &b2, {&arg2, &arg3}, {c64, c42});
   BlockBuilder(b1, Append())([]{
-      ret();
+      std_ret();
   });
   BlockBuilder(b2, Append())([]{
-      ret();
+      std_ret();
   });
 
   // CHECK-LABEL: @builder_cond_branch_eager
@@ -340,9 +324,8 @@ TEST_FUNC(builder_cond_branch_eager) {
 }
 
 TEST_FUNC(builder_helpers) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
+  auto indexType = IndexType::get(&globalContext());
   auto f32Type = FloatType::getF32(&globalContext());
   auto memrefType =
       MemRefType::get({ShapedType::kDynamicSize, ShapedType::kDynamicSize,
@@ -356,10 +339,12 @@ TEST_FUNC(builder_helpers) {
   // clang-format off
   ValueHandle f7(
       ValueHandle::create<ConstantFloatOp>(llvm::APFloat(7.0f), f32Type));
-  MemRefView vA(f.getArgument(0)), vB(f.getArgument(1)),
+  MemRefBoundsCapture vA(f.getArgument(0)), vB(f.getArgument(1)),
       vC(f.getArgument(2));
-  IndexedValue A(f.getArgument(0)), B(f.getArgument(1)), C(f.getArgument(2));
-  IndexHandle i, j, k1, k2, lb0, lb1, lb2, ub0, ub1, ub2;
+  AffineIndexedValue A(f.getArgument(0)), B(f.getArgument(1)), C(f.getArgument(2));
+  ValueHandle i(indexType), j(indexType), k1(indexType), k2(indexType),
+      lb0(indexType), lb1(indexType), lb2(indexType),
+      ub0(indexType), ub1(indexType), ub2(indexType);
   int64_t step0, step1, step2;
   std::tie(lb0, ub0, step0) = vA.range(0);
   std::tie(lb1, ub1, step1) = vA.range(1);
@@ -398,8 +383,6 @@ TEST_FUNC(builder_helpers) {
 }
 
 TEST_FUNC(custom_ops) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto indexType = IndexType::get(&globalContext());
   auto f = makeFunction("custom_ops", {}, {indexType, indexType});
@@ -413,8 +396,9 @@ TEST_FUNC(custom_ops) {
   // clang-format off
   ValueHandle vh(indexType), vh20(indexType), vh21(indexType);
   OperationHandle ih0, ih2;
-  IndexHandle m, n, M(f.getArgument(0)), N(f.getArgument(1));
-  IndexHandle ten(index_type(10)), twenty(index_type(20));
+  ValueHandle m(indexType), n(indexType);
+  ValueHandle M(f.getArgument(0)), N(f.getArgument(1));
+  ValueHandle ten(std_constant_index(10)), twenty(std_constant_index(20));
   AffineLoopNestBuilder({&m, &n}, {M, N}, {M + ten, N + twenty}, {1, 1})([&]{
     vh = MY_CUSTOM_OP({m, m + n}, {indexType}, {});
     ih0 = MY_CUSTOM_OP_0({m, m + n}, {});
@@ -438,8 +422,6 @@ TEST_FUNC(custom_ops) {
 }
 
 TEST_FUNC(insertion_in_block) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto indexType = IndexType::get(&globalContext());
   auto f = makeFunction("insertion_in_block", {}, {indexType, indexType});
@@ -463,23 +445,22 @@ TEST_FUNC(insertion_in_block) {
   f.erase();
 }
 
-TEST_FUNC(zero_and_sign_extendi_op_i1_to_i8) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
+TEST_FUNC(zero_and_std_sign_extendi_op_i1_to_i8) {
   using namespace edsc::op;
   auto i1Type = IntegerType::get(1, &globalContext());
   auto i8Type = IntegerType::get(8, &globalContext());
   auto memrefType = MemRefType::get({}, i1Type, {}, 0);
-  auto f = makeFunction("zero_and_sign_extendi_op", {}, {memrefType, memrefType});
+  auto f = makeFunction("zero_and_std_sign_extendi_op", {},
+                        {memrefType, memrefType});
 
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
-  IndexedValue A(f.getArgument(0));
-  IndexedValue B(f.getArgument(1));
+  AffineIndexedValue A(f.getArgument(0));
+  AffineIndexedValue B(f.getArgument(1));
   // clang-format off
-  edsc::intrinsics::zero_extendi(*A, i8Type);
-  edsc::intrinsics::sign_extendi(*B, i8Type);
-  // CHECK-LABEL: @zero_and_sign_extendi_op
+  edsc::intrinsics::std_zero_extendi(*A, i8Type);
+  edsc::intrinsics::std_sign_extendi(*B, i8Type);
+  // CHECK-LABEL: @zero_and_std_sign_extendi_op
   //      CHECK:     %[[SRC1:.*]] = affine.load
   //      CHECK:     zexti %[[SRC1]] : i1 to i8
   //      CHECK:     %[[SRC2:.*]] = affine.load
@@ -490,9 +471,8 @@ TEST_FUNC(zero_and_sign_extendi_op_i1_to_i8) {
 }
 
 TEST_FUNC(select_op_i32) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
+  auto indexType = IndexType::get(&globalContext());
   auto f32Type = FloatType::getF32(&globalContext());
   auto memrefType = MemRefType::get(
       {ShapedType::kDynamicSize, ShapedType::kDynamicSize}, f32Type, {}, 0);
@@ -501,16 +481,17 @@ TEST_FUNC(select_op_i32) {
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
   // clang-format off
-  ValueHandle zero = constant_index(0), one = constant_index(1);
-  MemRefView vA(f.getArgument(0));
-  IndexedValue A(f.getArgument(0));
-  IndexHandle i, j;
+  ValueHandle zero = std_constant_index(0), one = std_constant_index(1);
+  MemRefBoundsCapture vA(f.getArgument(0));
+  AffineIndexedValue A(f.getArgument(0));
+  ValueHandle i(indexType), j(indexType);
   AffineLoopNestBuilder({&i, &j}, {zero, zero}, {one, one}, {1, 1})([&]{
-    // This test exercises IndexedValue::operator Value.
+    // This test exercises AffineIndexedValue::operator Value.
     // Without it, one must force conversion to ValueHandle as such:
-    //   edsc::intrinsics::select(
+    //   std_select(
     //      i == zero, ValueHandle(A(zero, zero)), ValueHandle(ValueA(i, j)))
-    edsc::intrinsics::select(i == zero, *A(zero, zero), *A(i, j));
+    using edsc::op::operator==;
+    std_select(i == zero, *A(zero, zero), *A(i, j));
   });
 
   // CHECK-LABEL: @select_op
@@ -526,9 +507,7 @@ TEST_FUNC(select_op_i32) {
 }
 
 TEST_FUNC(select_op_f32) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
-  using namespace edsc::op;
+  auto indexType = IndexType::get(&globalContext());
   auto f32Type = FloatType::getF32(&globalContext());
   auto memrefType = MemRefType::get(
       {ShapedType::kDynamicSize, ShapedType::kDynamicSize}, f32Type, {}, 0);
@@ -537,18 +516,18 @@ TEST_FUNC(select_op_f32) {
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
   // clang-format off
-  ValueHandle zero = constant_index(0), one = constant_index(1);
-  MemRefView vA(f.getArgument(0)), vB(f.getArgument(1));
-  IndexedValue A(f.getArgument(0)), B(f.getArgument(1));
-  IndexHandle i, j;
+  ValueHandle zero = std_constant_index(0), one = std_constant_index(1);
+  MemRefBoundsCapture vA(f.getArgument(0)), vB(f.getArgument(1));
+  AffineIndexedValue A(f.getArgument(0)), B(f.getArgument(1));
+  ValueHandle i(indexType), j(indexType);
   AffineLoopNestBuilder({&i, &j}, {zero, zero}, {one, one}, {1, 1})([&]{
-
-    edsc::intrinsics::select(B(i, j) == B(i+one, j), *A(zero, zero), *A(i, j));
-    edsc::intrinsics::select(B(i, j) != B(i+one, j), *A(zero, zero), *A(i, j));
-    edsc::intrinsics::select(B(i, j) >= B(i+one, j), *A(zero, zero), *A(i, j));
-    edsc::intrinsics::select(B(i, j) <= B(i+one, j), *A(zero, zero), *A(i, j));
-    edsc::intrinsics::select(B(i, j) < B(i+one, j), *A(zero, zero), *A(i, j));
-    edsc::intrinsics::select(B(i, j) > B(i+one, j), *A(zero, zero), *A(i, j));
+    using namespace edsc::op;
+    std_select(B(i, j) == B(i + one, j), *A(zero, zero), *A(i, j));
+    std_select(B(i, j) != B(i + one, j), *A(zero, zero), *A(i, j));
+    std_select(B(i, j) >= B(i + one, j), *A(zero, zero), *A(i, j));
+    std_select(B(i, j) <= B(i + one, j), *A(zero, zero), *A(i, j));
+    std_select(B(i, j) < B(i + one, j), *A(zero, zero), *A(i, j));
+    std_select(B(i, j) > B(i + one, j), *A(zero, zero), *A(i, j));
   });
 
   // CHECK-LABEL: @select_op
@@ -604,9 +583,7 @@ TEST_FUNC(select_op_f32) {
 // Inject an EDSC-constructed computation to exercise imperfectly nested 2-d
 // tiling.
 TEST_FUNC(tile_2d) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
-  using namespace edsc::op;
+  auto indexType = IndexType::get(&globalContext());
   auto memrefType =
       MemRefType::get({ShapedType::kDynamicSize, ShapedType::kDynamicSize,
                        ShapedType::kDynamicSize},
@@ -615,12 +592,16 @@ TEST_FUNC(tile_2d) {
 
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
-  ValueHandle zero = constant_index(0);
-  MemRefView vA(f.getArgument(0)), vB(f.getArgument(1)), vC(f.getArgument(2));
-  IndexedValue A(f.getArgument(0)), B(f.getArgument(1)), C(f.getArgument(2));
-  IndexHandle i, j, k1, k2, M(vC.ub(0)), N(vC.ub(1)), O(vC.ub(2));
+  ValueHandle zero = std_constant_index(0);
+  MemRefBoundsCapture vA(f.getArgument(0)), vB(f.getArgument(1)),
+      vC(f.getArgument(2));
+  AffineIndexedValue A(f.getArgument(0)), B(f.getArgument(1)),
+      C(f.getArgument(2));
+  ValueHandle i(indexType), j(indexType), k1(indexType), k2(indexType);
+  ValueHandle M(vC.ub(0)), N(vC.ub(1)), O(vC.ub(2));
 
   // clang-format off
+  using namespace edsc::op;
   AffineLoopNestBuilder({&i, &j}, {zero, zero}, {M, N}, {1, 1})([&]{
     AffineLoopNestBuilder(&k1, zero, O, 1)([&]{
       C(i, j, k1) = A(i, j, k1) + B(i, j, k1);
@@ -675,8 +656,6 @@ TEST_FUNC(tile_2d) {
 
 // Exercise StdIndexedValue for loads and stores.
 TEST_FUNC(indirect_access) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto memrefType = MemRefType::get({ShapedType::kDynamicSize},
                                     FloatType::getF32(&globalContext()), {}, 0);
@@ -685,11 +664,11 @@ TEST_FUNC(indirect_access) {
 
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
-  ValueHandle zero = constant_index(0);
-  MemRefView vC(f.getArgument(2));
-  IndexedValue B(f.getArgument(1)), D(f.getArgument(3));
+  ValueHandle zero = std_constant_index(0);
+  MemRefBoundsCapture vC(f.getArgument(2));
+  AffineIndexedValue B(f.getArgument(1)), D(f.getArgument(3));
   StdIndexedValue A(f.getArgument(0)), C(f.getArgument(2));
-  IndexHandle i, N(vC.ub(0));
+  ValueHandle i(builder.getIndexType()), N(vC.ub(0));
 
   // clang-format off
   AffineLoopNestBuilder(&i, zero, N, 1)([&]{
@@ -711,8 +690,6 @@ TEST_FUNC(indirect_access) {
 
 // Exercise affine loads and stores build with empty maps.
 TEST_FUNC(empty_map_load_store) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto memrefType =
       MemRefType::get({}, FloatType::getF32(&globalContext()), {}, 0);
@@ -721,10 +698,10 @@ TEST_FUNC(empty_map_load_store) {
 
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
-  ValueHandle zero = constant_index(0);
-  ValueHandle one = constant_index(1);
-  IndexedValue input(f.getArgument(0)), res(f.getArgument(1));
-  IndexHandle iv;
+  ValueHandle zero = std_constant_index(0);
+  ValueHandle one = std_constant_index(1);
+  AffineIndexedValue input(f.getArgument(0)), res(f.getArgument(1));
+  ValueHandle iv(builder.getIndexType());
 
   // clang-format off
   AffineLoopNestBuilder(&iv, zero, one, 1)([&]{
@@ -749,8 +726,6 @@ TEST_FUNC(empty_map_load_store) {
 // CHECK-NEXT: } else {
 // clang-format on
 TEST_FUNC(affine_if_op) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
   using namespace edsc::op;
   auto f32Type = FloatType::getF32(&globalContext());
   auto memrefType = MemRefType::get(
@@ -760,7 +735,7 @@ TEST_FUNC(affine_if_op) {
   OpBuilder builder(f.getBody());
   ScopedContext scope(builder, f.getLoc());
 
-  ValueHandle zero = constant_index(0), ten = constant_index(10);
+  ValueHandle zero = std_constant_index(0), ten = std_constant_index(10);
 
   SmallVector<bool, 4> isEq = {false, false, false, false};
   SmallVector<AffineExpr, 4> affineExprs = {
@@ -927,9 +902,6 @@ TEST_FUNC(linalg_dilated_conv_nhwc) {
 //       CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<32x16xf32> into memref<4x8x16xf32>
 // clang-format on
 TEST_FUNC(linalg_metadata_ops) {
-  using namespace edsc;
-  using namespace edsc::intrinsics;
-
   auto f32Type = FloatType::getF32(&globalContext());
   auto memrefType = MemRefType::get({4, 8, 16}, f32Type, {}, 0);
   auto f = makeFunction("linalg_metadata_ops", {}, {memrefType});


        


More information about the Mlir-commits mailing list