[Mlir-commits] [mlir] 0b2197b - [mlir][Interfaces] Clean up `DestinationStyleOpInterface` (#67015)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Sep 21 09:04:14 PDT 2023


Author: Matthias Springer
Date: 2023-09-21T18:04:08+02:00
New Revision: 0b2197b0cf9d557cb3a20e0aca3f727aea5b290d

URL: https://github.com/llvm/llvm-project/commit/0b2197b0cf9d557cb3a20e0aca3f727aea5b290d
DIFF: https://github.com/llvm/llvm-project/commit/0b2197b0cf9d557cb3a20e0aca3f727aea5b290d.diff

LOG: [mlir][Interfaces] Clean up `DestinationStyleOpInterface` (#67015)

* "init" operands are specified with `MutableOperandRange` (which gives
access to the underlying `OpOperand *`). No more magic numbers.
* Remove most interface methods and make them helper functions. Only
`getInitsMutable` should be implemented.
* Provide separate helper functions for accessing mutable/immutable
operands (`OpOperand`/`Value`, in line with #66515): `getInitsMutable`
and `getInits` (same naming convention as auto-generated op accessors).
`getInputOperands` was not renamed because this function cannot return a
`MutableOperandRange` (because the operands are not necessarily
consecutive). `OpOperandVector` is no longer needed.
* The new `getDpsInits`/`getDpsInitsMutable` is more efficient than the
old `getDpsInitOperands` because no `SmallVector` is created. The new
functions return a range of operands.
* Fix a bug in `getDpsInputOperands`: out-of-bounds operands were
potentially returned.

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
    mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
    mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
    mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h
    mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
    mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
    mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
    mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
    mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp
    mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
    mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
    mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
    mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
    mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
    mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
    mlir/lib/Interfaces/DestinationStyleOpInterface.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
    mlir/test/lib/Dialect/Test/TestOps.td
    mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
    mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index 6fcba25a0f29752..9761ab12134ad28 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -264,9 +264,7 @@ def Bufferization_MaterializeInDestinationOp
       return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
 
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {1, 2};  // `dest` operand
-    }
+    MutableOperandRange getDpsInitsMutable() { return getDestMutable(); }
   }];
 
   let assemblyFormat = "$source `in` $dest attr-dict `:` type($source)";

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index 839861c2369ca1d..9ca029b489ad144 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -555,12 +555,12 @@ def LinalgStructuredInterface
         are expection. For example, in `map` output operand isn't used in
         the block.
       }],
-      /*retTy=*/"OpOperandVector",
+      /*retTy=*/"::llvm::SmallVector<OpOperand *>",
       /*methodName=*/"getOpOperandsMatchingBBargs",
       /*args=*/(ins),
       /*methodBody=*/"",
       /*defaultImplementation=*/[{
-        OpOperandVector result;
+        ::llvm::SmallVector<OpOperand *> result;
         result.reserve($_op->getNumOperands());
         llvm::transform(
           this->getOperation()->getOpOperands(),

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
index 4d06747a05d6350..da12e7c83b22b89 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
@@ -149,14 +149,7 @@ def Linalg_SoftmaxOp : Linalg_Op<"softmax",
     int64_t getOutputOperandRank() {
       return getOutputOperandType().getRank();
     }
-    // Method to implement DestinationStyleOpInterface.
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      std::pair<unsigned, unsigned> outputsIndexAndLength =
-        getODSOperandIndexAndLength(1);
-      return std::make_pair<int64_t, int64_t>(
-          outputsIndexAndLength.first,
-          outputsIndexAndLength.first + outputsIndexAndLength.second);
-    }
+    MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
   }];
   let hasVerifier = 1;
 }

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index c8d579949dc4eb6..21a5e5cc47aeb5c 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -207,10 +207,8 @@ def GenericOp : LinalgStructuredBase_Op<"generic", [
     getRegionBuilder() {
       return nullptr;
     }
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t getNumOperands = this->getNumOperands();
-      return {getNumOperands - getOutputs().size(), getNumOperands};
-    }
+
+    MutableOperandRange getDpsInitsMutable() { return getOutputsMutable(); }
   }];
 
   let hasCanonicalizer = 1;
@@ -283,11 +281,9 @@ def MapOp : LinalgStructuredBase_Op<"map", [
     }
 
     // Implement functions necessary for DestinationStyleOpInterface.
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t getNumOperands = this->getNumOperands();
-      return {getNumOperands - 1, getNumOperands};
-    }
-    OpOperandVector getOpOperandsMatchingBBargs() {
+    MutableOperandRange getDpsInitsMutable() { return getInitMutable(); }
+
+    SmallVector<OpOperand *> getOpOperandsMatchingBBargs() {
       return getDpsInputOperands();
     }
 
@@ -381,9 +377,7 @@ def ReduceOp : LinalgStructuredBase_Op<"reduce", [
     getRegionBuilder() {
       return nullptr;
     }
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {getInits().size(), getNumOperands()};
-    }
+    MutableOperandRange getDpsInitsMutable() { return getInitsMutable(); }
   }];
 
   let hasCustomAssemblyFormat = 1;
@@ -446,10 +440,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
     }
 
     // Implement functions necessary for DestinationStyleOpInterface.
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t getNumOperands = this->getNumOperands();
-      return {getNumOperands - 1, getNumOperands};
-    }
+    MutableOperandRange getDpsInitsMutable() { return getInitMutable(); }
 
     static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
         mlir::ArrayRef<mlir::NamedAttribute>)>
@@ -517,10 +508,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
     }
 
     // Implement functions necessary for DestinationStyleOpInterface.
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t getNumOperands = this->getNumOperands();
-      return {getNumOperands - 1, getNumOperands};
-    }
+    MutableOperandRange getDpsInitsMutable() { return getInitMutable(); }
 
     static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
         mlir::ArrayRef<mlir::NamedAttribute>)>

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
index d1c33d8b4c03c3f..86a250b77dcc8ee 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
+++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
@@ -750,9 +750,7 @@ def Tensor_InsertOp : Tensor_Op<"insert", [
   }];
 
   let extraClassDeclaration = [{
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {1, 2};  // `dest` operand
-    }
+    MutableOperandRange getDpsInitsMutable() { return getDestMutable(); }
   }];
 
   let hasFolder = 1;
@@ -892,9 +890,7 @@ def Tensor_InsertSliceOp : Tensor_OpWithOffsetSizesAndStrides<"insert_slice", [
     /// and `strides` operands.
     static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 2; }
 
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {1, 2};  // `dest` operand
-    }
+    MutableOperandRange getDpsInitsMutable() { return getDestMutable(); }
   }];
 
   let hasCanonicalizer = 1;
@@ -1714,10 +1710,7 @@ class Tensor_RelayoutOp<string mnemonic, list<Trait> traits = []> :
     RankedTensorType getDestType() {
       return ::llvm::cast<RankedTensorType>(getDest().getType()); };
 
-    /// Return position for init operand. Init operand is `dest`.
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {1, 2}; // `dest` operand
-    }
+    MutableOperandRange getDpsInitsMutable() { return getDestMutable(); }
 
     /// Interface method for ConditionallySpeculatable.
     Speculation::Speculatability getSpeculatability();

diff  --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index aba85f86a7eef9c..701eefcc1e7da6a 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1330,8 +1330,8 @@ def Vector_TransferReadOp :
     // MaskableOpInterface methods.
     bool supportsPassthru() { return true; }
 
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {0, 0};  // empty range (no init operands)
+    MutableOperandRange getDpsInitsMutable() {
+      return MutableOperandRange(getOperation(), /*start=*/0, /*length=*/0);
     }
   }];
 
@@ -1494,9 +1494,7 @@ def Vector_TransferWriteOp :
     ///  ops of other dialects.
     Value getValue() { return getVector(); }
 
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      return {1, 2};  // `source` operand
-    }
+    MutableOperandRange getDpsInitsMutable() { return getSourceMutable(); }
   }];
 
   let hasFolder = 1;

diff  --git a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h
index 9ace95c6f3d3b2f..6649371f3ed321e 100644
--- a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h
+++ b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h
@@ -17,11 +17,6 @@
 #include "llvm/ADT/SmallVector.h"
 
 namespace mlir {
-/// OpOperand vector that implicitly converts to a Value vector.
-struct OpOperandVector : public llvm::SmallVector<OpOperand *> {
-  operator SmallVector<Value>();
-};
-
 namespace detail {
 /// Verify that `op` conforms to the invariants of DestinationStyleOpInterface
 LogicalResult verifyDestinationStyleOpInterface(Operation *op);

diff  --git a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
index 8558e0279e3e5bb..4c52d803e114762 100644
--- a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
+++ b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
@@ -13,16 +13,16 @@ include "mlir/IR/OpBase.td"
 
 def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
   let description = [{
-    Ops that are in destination style have designated init operands, which act
+    Ops that are in destination style have designated "init" operands, which act
     as initial tensor values for the results of the operation or the init
     buffers to which the results of the op will be written.
 
     Init operands must be ranked tensors or ranked memrefs. Input operands can
     have any type. All non-init operands are DPS inputs.
 
-    It is assumed that the init operands of the op are the operands at
-    position [start, end). The positions are defined by getDpsInitsPositionRange
-    method.
+    The init operands of this op are specified by the MutableOperandRange that
+    the `getDpsInitsMutable` interface methods returns. This implies that the
+    init operands must be a consecutive range of operands.
 
     If the op has "tensor semantics", then the input operands are either ranked
     tensors or other non-tensor/memref types ("scalars"). The init operands are
@@ -50,241 +50,157 @@ def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
     Example of an op that is not in destination style: `%r = tensor.pad %t`.
     This op is not in destination style because `%r` and `%t` have 
diff erent
     shape.
-
-    Each op that wants to implement DestinationStyleOpInterface needs to define
-    the getDpsInitsPositionRange() method.
   }];
 
   let cppNamespace = "::mlir";
 
   let methods = [
-    // This method has to be defined for every DPS op.
     InterfaceMethod<
       /*desc=*/"Return start and end indices of the init operands range.",
-      /*retTy=*/"std::pair<int64_t, int64_t>",
-      /*methodName=*/"getDpsInitsPositionRange",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/""
-    >,
-    //===------------------------------------------------------------------===//
-    // Operands handling.
-    //===------------------------------------------------------------------===//
-    // The operand list is assumed to start with the input operands and end
-    // with the init operands. Therefore, all methods to access the inputs
-    // and inits can be expressed if the number of init operands is know.
-    InterfaceMethod<
-      /*desc=*/"Return the number of inits.",
-      /*retTy=*/"int64_t",
-      /*methodName=*/"getNumDpsInits",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        return end - start;
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Return the init operands.",
-      /*retTy=*/"::mlir::OpOperandVector",
-      /*methodName=*/"getDpsInitOperands",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-
-        ::mlir::OpOperandVector result;
-        result.reserve(end - start);
-        for (int i = start; i < end; ++i)
-          result.push_back(&$_op->getOpOperand(i));
-        return result;
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Return the `i`-th init operand.",
-      /*retTy=*/"::mlir::OpOperand *",
-      /*methodName=*/"getDpsInitOperand",
-      /*args=*/(ins "int64_t":$i),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(i >= 0 && i < $_op.getNumDpsInits());
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        return &$_op->getOpOperand(start + i);
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Set the `i`-th init operand.",
-      /*retTy=*/"void",
-      /*methodName=*/"setDpsInitOperand",
-      /*args=*/(ins "int64_t":$i, "::mlir::Value":$value),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(i >= 0 && i < $_op.getNumDpsInits());
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        $_op->setOperand(start + i, value);
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Return the number of inputs.",
-      /*retTy=*/"int64_t",
-      /*methodName=*/"getNumDpsInputs",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        return $_op.getNumOperands() - $_op.getNumDpsInits();
-      }]
+      /*retTy=*/"::mlir::MutableOperandRange",
+      /*methodName=*/"getDpsInitsMutable",
+      /*args=*/(ins)
     >,
-    InterfaceMethod<
-      /*desc=*/"Return the input operands.",
-      /*retTy=*/"::mlir::OpOperandVector",
-      /*methodName=*/"getDpsInputOperands",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        int64_t numInits = end - start;
-        int64_t numOperands = $_op.getNumOperands();
+  ];
 
-        ::mlir::OpOperandVector result;
-        result.reserve(numOperands - numInits);
-        for (int i = 0; i < start; ++i)
+  let extraSharedClassDeclaration = [{
+    ::mlir::OperandRange getDpsInits() {
+      return $_op.getDpsInitsMutable();
+    }
+
+    /// Return the number of DPS inits.
+    int64_t getNumDpsInits() { return $_op.getDpsInits().size(); }
+
+    /// Return the `i`-th DPS init.
+    ::mlir::OpOperand *getDpsInitOperand(int64_t i) {
+      return &$_op.getDpsInitsMutable()[i];
+    }
+
+    /// Set the `i`-th DPS init.
+    void setDpsInitOperand(int64_t i, Value value) {
+      assert(i >= 0 && i < $_op.getNumDpsInits() && "invalid index");
+      $_op->setOperand($_op.getDpsInits().getBeginOperandIndex() + i, value);
+    }
+
+    /// Return the number of DPS inits.
+    int64_t getNumDpsInputs() {
+      return $_op->getNumOperands() - $_op.getNumDpsInits();
+    }
+
+    /// Return the DPS input operands.
+    ::llvm::SmallVector<::mlir::OpOperand *> getDpsInputOperands() {
+      ::llvm::SmallVector<::mlir::OpOperand *> result;
+      int64_t numOperands = $_op->getNumOperands();
+      ::mlir::OperandRange range = $_op.getDpsInits();
+      if (range.empty()) {
+        result.reserve(numOperands);
+        for (int64_t i = 0; i < numOperands; ++i)
           result.push_back(&$_op->getOpOperand(i));
-        for (int i = end; i < numOperands; ++i)
-          result.push_back(&$_op->getOpOperand(end + i));
-
         return result;
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/[{ Return the `i`-th input operand.  }],
-      /*retTy=*/"::mlir::OpOperand *",
-      /*methodName=*/"getDpsInputOperand",
-      /*args=*/(ins "int64_t":$i),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(i >= 0 && i < getNumDpsInputs());
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        return &$_op->getOpOperand(i < start ? i : i + end - start) ;
-      }]
-    >,
-    //===------------------------------------------------------------------===//
-    // Input and DpsInit arguments handling.
-    //===------------------------------------------------------------------===//
-    InterfaceMethod<
-      /*desc=*/"Return true if `opOperand` is an input.",
-      /*retTy=*/"bool",
-      /*methodName=*/"isDpsInput",
-      /*args=*/(ins "::mlir::OpOperand *":$opOperand),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        auto operandNumber = opOperand->getOperandNumber();
-        return operandNumber < start || operandNumber >= end;
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Return true if `opOperand` is an init.",
-      /*retTy=*/"bool",
-      /*methodName=*/"isDpsInit",
-      /*args=*/(ins "::mlir::OpOperand *":$opOperand),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        auto operandNumber = opOperand->getOperandNumber();
-        return operandNumber >= start && operandNumber < end;
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/[{
-        Return true if the `opOperand` is a scalar value. A scalar is defined
-        as neither a memref nor a tensor value.
-      }],
-      /*retTy=*/"bool",
-      /*methodName=*/"isScalar",
-      /*args=*/(ins "::mlir::OpOperand *":$opOperand),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(opOperand->getOwner() == $_op.getOperation());
-        return !::llvm::isa<MemRefType, TensorType>(opOperand->get().getType());
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Return the OpResult that is tied to the given OpOperand.",
-      /*retTy=*/"::mlir::OpResult",
-      /*methodName=*/"getTiedOpResult",
-      /*args=*/(ins "::mlir::OpOperand *":$opOperand),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(opOperand->getOwner() == $_op.getOperation());
-
-        auto [start, end] = $_op.getDpsInitsPositionRange();
-        int64_t resultIndex = opOperand->getOperandNumber() - start;
+      }
+      int64_t firstInitPos = range.getBeginOperandIndex();
+      int64_t numInits = range.size();
+      result.reserve(numOperands - numInits);
+      for (int64_t i = 0; i < firstInitPos; ++i)
+        result.push_back(&$_op->getOpOperand(i));
+      for (int64_t i = firstInitPos + numInits; i < numOperands; ++i)
+        result.push_back(&$_op->getOpOperand(i));
+      return result;
+    }
+
+    /// Return the DPS input operands.
+    ::llvm::SmallVector<::mlir::Value> getDpsInputs() {
+      return ::llvm::to_vector(::llvm::map_range(
+          $_op.getDpsInputOperands(), [](OpOperand *o) { return o->get(); }));
+    }
+
+    /// Return the `i`-th DPS input operand.
+    ::mlir::OpOperand *getDpsInputOperand(int64_t i) {
+      ::mlir::OperandRange range = $_op.getDpsInits();
+      if (range.empty())
+        return &$_op->getOpOperand(i);
+      int64_t firstInitPos = range.getBeginOperandIndex();
+      int64_t numInits = range.size();
+      assert(i >= 0 && i < $_op->getNumOperands() - numInits
+             && "invalid index");
+      return &$_op->getOpOperand(
+          i < firstInitPos ? i : i + firstInitPos + numInits);
+    }
+
+    /// Return "true" if `opOperand` is an "input".
+    bool isDpsInput(::mlir::OpOperand *opOperand) {
+      assert(opOperand->getOwner() == $_op && "invalid operand");
+      return !$_op.isDpsInit(opOperand);
+    }
+
+    /// Return "true" if `opOperand` is an "init".
+    bool isDpsInit(::mlir::OpOperand *opOperand) {
+      assert(opOperand->getOwner() == $_op && "invalid operand");
+      ::mlir::OperandRange range = $_op.getDpsInits();
+      if (range.empty())
+        return false;
+      auto operandNumber = opOperand->getOperandNumber();
+      return operandNumber >= range.getBeginOperandIndex()
+          && operandNumber < range.getBeginOperandIndex() + range.size();
+    }
+
+    /// Return "true" if `opOperand` is a scalar value. A sclar is defined as
+    /// neither a MemRef nor a tensor value.
+    bool isScalar(::mlir::OpOperand *opOperand) {
+      assert(opOperand->getOwner() == $_op && "invalid operand");
+      return !::llvm::isa<MemRefType, TensorType>(opOperand->get().getType());
+    }
+
+    /// Return the OpResult that is tied to the given OpOperand.
+    ::mlir::OpResult getTiedOpResult(::mlir::OpOperand *opOperand) {
+        assert(opOperand->getOwner() == $_op && "invalid operand");
+        ::mlir::OperandRange range = $_op.getDpsInits();
+        assert(!range.empty() && "op has no inits");
+        int64_t resultIndex =
+            opOperand->getOperandNumber() - range.getBeginOperandIndex();
         assert(resultIndex >= 0 &&
-               resultIndex < $_op->getNumResults() );
+               resultIndex < $_op->getNumResults());
         return $_op->getResult(resultIndex);
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/"Return the OpOperand that is tied to the given OpResult.",
-      /*retTy=*/"::mlir::OpOperand *",
-      /*methodName=*/"getTiedOpOperand",
-      /*args=*/(ins "::mlir::OpResult":$opResult),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(opResult.getDefiningOp() == $_op.getOperation());
-        return $_op.getDpsInitOperand(opResult.getResultNumber());
-      }]
-    >,
-    //===------------------------------------------------------------------===//
-    // Other interface methods.
-    //===------------------------------------------------------------------===//
-    InterfaceMethod<
-      /*desc=*/[{
-        Return whether the op has buffer semantics. That is the case if the op
-        has no tensor operands and at least one memref operand.
-      }],
-      /*retTy=*/"bool",
-      /*methodName=*/"hasBufferSemantics",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        // No tensors.
-        auto isTensor = [](Value v){
-          return ::llvm::isa<::mlir::RankedTensorType>(v.getType());
-        };
-        if (::llvm::any_of($_op->getOperands(), isTensor))
-          return false;
-        // At least one memref.
-        auto isMemref = [](Value v){
-          return ::llvm::isa<::mlir::MemRefType>(v.getType());
-        };
-        return llvm::any_of($_op->getOperands(), isMemref);
-      }]
-    >,
-    InterfaceMethod<
-      /*desc=*/[{
-        Return whether the op has tensor semantics. That is the case if the op
-        has no memref operands and at least one tensor operand.
-      }],
-      /*retTy=*/"bool",
-      /*methodName=*/"hasTensorSemantics",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        // No memrefs.
-        auto isMemref = [](Value v){
-          return ::llvm::isa<::mlir::MemRefType>(v.getType());
-        };
-        if (::llvm::any_of($_op->getOperands(), isMemref))
-          return false;
-        // At least one tensor.
-        auto isTensor = [](Value v){
-          return ::llvm::isa<::mlir::RankedTensorType>(v.getType());
-        };
-        return llvm::any_of($_op->getOperands(), isTensor);
-      }]
-    >
-  ];
+    }
+
+    /// Return the OpOperand that is tied to the given OpResult.
+    ::mlir::OpOperand *getTiedOpOperand(::mlir::OpResult opResult) {
+      assert(opResult.getDefiningOp() == $_op && "invalid opresult");
+      return $_op.getDpsInitOperand(opResult.getResultNumber());
+    }
+
+    /// Return whether the op has buffer semantics. That is the case if the op
+    /// has no ranked tensor operands and at least one memref operand.
+    bool hasBufferSemantics() {
+      // No tensors.
+      auto isTensor = [](Value v){
+        return ::llvm::isa<::mlir::RankedTensorType>(v.getType());
+      };
+      if (::llvm::any_of($_op->getOperands(), isTensor))
+        return false;
+      // At least one memref.
+      auto isMemref = [](Value v){
+        return ::llvm::isa<::mlir::MemRefType>(v.getType());
+      };
+      return llvm::any_of($_op->getOperands(), isMemref);
+    }
+
+    /// Return whether the op has tensor semantics. That is the case if the op
+    /// has no memref operands and at least one ranked tensor operand.
+    bool hasTensorSemantics() {
+      // No memrefs.
+      auto isMemref = [](Value v){
+        return ::llvm::isa<::mlir::MemRefType>(v.getType());
+      };
+      if (::llvm::any_of($_op->getOperands(), isMemref))
+        return false;
+      // At least one tensor.
+      auto isTensor = [](Value v){
+        return ::llvm::isa<::mlir::RankedTensorType>(v.getType());
+      };
+      return llvm::any_of($_op->getOperands(), isTensor);
+    }
+  }];
 
   let verify = [{ return detail::verifyDestinationStyleOpInterface($_op); }];
   let verifyWithRegions = 1;

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index 95a766b33757130..ea50e1232a4c74a 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -904,8 +904,8 @@ getResultsPositionInLoopsToShapeMap(LinalgOp &op) {
   int64_t outputRankSum = 0;
   for (OpOperand *input : op.getDpsInputOperands())
     inputRankSum += op.getRank(input);
-  for (OpOperand *output : op.getDpsInitOperands())
-    outputRankSum += op.getRank(output);
+  for (OpOperand &output : op.getDpsInitsMutable())
+    outputRankSum += op.getRank(&output);
   return {inputRankSum, inputRankSum + outputRankSum};
 }
 
@@ -948,19 +948,18 @@ LinalgOp::reifyResultShapes(OpBuilder &b,
           createFlatListOfOperandDims(b, loc));
   int64_t pos = 0;
   ArrayRef<AffineExpr> shapeExprs = resultShapesFromInputShapesMap.getResults();
-  for (OpOperand *opOperand : getDpsInitOperands()) {
+  for (OpOperand &opOperand : getDpsInitsMutable()) {
     SmallVector<OpFoldResult> shapes;
-    for (int64_t dim : llvm::seq<int64_t>(0, getRank(opOperand))) {
-      auto shapedType = llvm::cast<ShapedType>(opOperand->get().getType());
+    for (int64_t dim : llvm::seq<int64_t>(0, getRank(&opOperand))) {
+      auto shapedType = llvm::cast<ShapedType>(opOperand.get().getType());
       if (!shapedType.isDynamicDim(dim)) {
         // Static dim: Return IntegerAttr.
         shapes.push_back(b.getIndexAttr(shapedType.getDimSize(dim)));
       } else {
         // Dynamic dim: Return Value.
-        OpFoldResult ofr =
-            checkDimExpr.visit(shapeExprs[pos])
-                ? createOrFoldDimOp(b, loc, opOperand->get(), dim)
-                : allResultDimValues[pos];
+        OpFoldResult ofr = checkDimExpr.visit(shapeExprs[pos])
+                               ? createOrFoldDimOp(b, loc, opOperand.get(), dim)
+                               : allResultDimValues[pos];
         shapes.push_back(getValueOrCreateConstantIndexOp(b, loc, ofr));
       }
       pos++;
@@ -977,7 +976,7 @@ int64_t LinalgOp::getIndexingMapIndex(OpOperand *opOperand) {
   auto dpsIface = cast<DestinationStyleOpInterface>(*this->getOperation());
   if (!dpsIface.isDpsInput(opOperand))
     return operandNumber;
-  auto [start, end] = dpsIface.getDpsInitsPositionRange();
+  unsigned start = dpsIface.getDpsInits().getBeginOperandIndex();
   assert(!dpsIface.isDpsInit(opOperand));
   // Account for potential inputs that are not DPS and may not appear in
   // `indexingMaps`.

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index f87aa4559e10afe..5871c59e1d35d95 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -948,8 +948,7 @@ void GenericOp::print(OpAsmPrinter &p) {
   }
 
   // Printing is shared with named ops, except for the region and attributes
-  printCommonStructuredOpParts(p, SmallVector<Value>(getDpsInputOperands()),
-                               SmallVector<Value>(getDpsInitOperands()));
+  printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits());
 
   genericAttrNames.push_back("operandSegmentSizes");
   genericAttrNamesSet.insert(genericAttrNames.back());
@@ -1044,20 +1043,20 @@ ParseResult GenericOp::parse(OpAsmParser &parser, OperationState &result) {
 static void getGenericEffectsImpl(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects,
-    ValueRange results, const OpOperandVector &inputOperands,
-    const OpOperandVector &outputOperands) {
-  for (auto *operand : inputOperands) {
-    if (!llvm::isa<MemRefType>(operand->get().getType()))
+    ValueRange results, const ValueRange inputOperands,
+    ValueRange outputOperands) {
+  for (auto operand : inputOperands) {
+    if (!llvm::isa<MemRefType>(operand.getType()))
       continue;
-    effects.emplace_back(MemoryEffects::Read::get(), operand->get(),
+    effects.emplace_back(MemoryEffects::Read::get(), operand,
                          SideEffects::DefaultResource::get());
   }
-  for (auto *operand : outputOperands) {
-    if (!llvm::isa<MemRefType>(operand->get().getType()))
+  for (auto operand : outputOperands) {
+    if (!llvm::isa<MemRefType>(operand.getType()))
       continue;
-    effects.emplace_back(MemoryEffects::Read::get(), operand->get(),
+    effects.emplace_back(MemoryEffects::Read::get(), operand,
                          SideEffects::DefaultResource::get());
-    effects.emplace_back(MemoryEffects::Write::get(), operand->get(),
+    effects.emplace_back(MemoryEffects::Write::get(), operand,
                          SideEffects::DefaultResource::get());
   }
 }
@@ -1065,8 +1064,8 @@ static void getGenericEffectsImpl(
 void GenericOp::getEffects(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects) {
-  getGenericEffectsImpl(effects, getOperation()->getResults(),
-                        getDpsInputOperands(), getDpsInitOperands());
+  getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(),
+                        getDpsInits());
 }
 
 LogicalResult GenericOp::verify() { return success(); }
@@ -1345,8 +1344,7 @@ void MapOp::print(OpAsmPrinter &p) {
     printShortForm(p, payloadOp);
   }
 
-  printCommonStructuredOpParts(p, SmallVector<Value>(getDpsInputOperands()),
-                               SmallVector<Value>(getDpsInitOperands()));
+  printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits());
   p.printOptionalAttrDict((*this)->getAttrs());
 
   if (!payloadOp) {
@@ -1414,8 +1412,8 @@ ArrayAttr MapOp::getIndexingMaps() {
 void MapOp::getEffects(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects) {
-  getGenericEffectsImpl(effects, getOperation()->getResults(),
-                        getDpsInputOperands(), getDpsInitOperands());
+  getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(),
+                        getDpsInits());
 }
 
 //===----------------------------------------------------------------------===//
@@ -1483,8 +1481,8 @@ ArrayAttr ReduceOp::getIndexingMaps() {
 void ReduceOp::getEffects(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects) {
-  getGenericEffectsImpl(effects, getOperation()->getResults(),
-                        getDpsInputOperands(), getDpsInitOperands());
+  getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(),
+                        getDpsInits());
 }
 
 static ParseResult parseDenseI64ArrayAttr(OpAsmParser &parser,
@@ -1547,8 +1545,7 @@ void ReduceOp::print(OpAsmPrinter &p) {
     printShortForm(p, payloadOp);
   }
 
-  printCommonStructuredOpParts(p, SmallVector<Value>(getDpsInputOperands()),
-                               SmallVector<Value>(getDpsInitOperands()));
+  printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits());
   printDenseI64ArrayAttr(p, getDimensionsAttrName(), getDimensions());
   p.printOptionalAttrDict((*this)->getAttrs(), {getDimensionsAttrName()});
   if (!payloadOp) {
@@ -1638,11 +1635,10 @@ LogicalResult ReduceOp::verify() {
   }
 
   // Check that the last block arguments match the element type of the outputs.
-  for (auto [output, bbArg] :
-       llvm::zip(getDpsInitOperands(),
-                 block->getArguments().take_back(getNumDpsInits()))) {
+  for (auto [output, bbArg] : llvm::zip(
+           getDpsInits(), block->getArguments().take_back(getNumDpsInits()))) {
     auto outputElementType =
-        llvm::cast<ShapedType>(output->get().getType()).getElementType();
+        llvm::cast<ShapedType>(output.getType()).getElementType();
     if (outputElementType != bbArg.getType())
       return emitOpError()
              << "output element type " << outputElementType
@@ -1712,8 +1708,7 @@ void TransposeOp::getAsmResultNames(
 }
 
 void TransposeOp::print(OpAsmPrinter &p) {
-  printCommonStructuredOpParts(p, SmallVector<Value>(getDpsInputOperands()),
-                               SmallVector<Value>(getDpsInitOperands()));
+  printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits());
   printDenseI64ArrayAttr(p, getPermutationAttrName(), getPermutation());
   p.printOptionalAttrDict((*this)->getAttrs(), {getPermutationAttrName()});
 }
@@ -1771,8 +1766,8 @@ ArrayAttr TransposeOp::getIndexingMaps() {
 void TransposeOp::getEffects(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects) {
-  getGenericEffectsImpl(effects, getOperation()->getResults(),
-                        getDpsInputOperands(), getDpsInitOperands());
+  getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(),
+                        getDpsInits());
 }
 
 //===----------------------------------------------------------------------===//
@@ -1826,8 +1821,7 @@ void BroadcastOp::getAsmResultNames(
 }
 
 void BroadcastOp::print(OpAsmPrinter &p) {
-  printCommonStructuredOpParts(p, SmallVector<Value>(getDpsInputOperands()),
-                               SmallVector<Value>(getDpsInitOperands()));
+  printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits());
   printDenseI64ArrayAttr(p, getDimensionsAttrName(), getDimensions());
   p.printOptionalAttrDict((*this)->getAttrs(), {getDimensionsAttrName()});
 }
@@ -1894,8 +1888,8 @@ ArrayAttr BroadcastOp::getIndexingMaps() {
 void BroadcastOp::getEffects(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects) {
-  getGenericEffectsImpl(effects, getOperation()->getResults(),
-                        getDpsInputOperands(), getDpsInitOperands());
+  getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(),
+                        getDpsInits());
 }
 
 //===----------------------------------------------------------------------===//
@@ -2126,8 +2120,9 @@ struct FoldTensorCastConsumerOp : public OpRewritePattern<tensor::CastOp> {
     OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber);
     Value newOperand =
         rewriter.create<tensor::CastOp>(loc, resultType, outOperand->get());
-    SmallVector<Value> newOperands{linalgOp.getDpsInputOperands()};
-    SmallVector<Value> outputOperands{linalgOp.getDpsInitOperands()};
+    SmallVector<Value> newOperands = linalgOp.getDpsInputs();
+    SmallVector<Value> outputOperands(linalgOp.getDpsInits().begin(),
+                                      linalgOp.getDpsInits().end());
     outputOperands[resultNumber] = newOperand;
     newOperands.append(outputOperands.begin(), outputOperands.end());
 
@@ -2399,8 +2394,8 @@ SoftmaxOp::reifyResultShapes(OpBuilder &b,
 void SoftmaxOp::getEffects(
     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
         &effects) {
-  getGenericEffectsImpl(effects, getOperation()->getResults(),
-                        getDpsInputOperands(), getDpsInitOperands());
+  getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(),
+                        getDpsInits());
 }
 
 // Helper functions for softmax decomposition.

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
index 684d823d9f3df8e..28377279b7ce94c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
@@ -119,9 +119,9 @@ struct BubbleUpExtractSliceOpPattern
                         /*omitPartialTileCheck=*/true);
 
     SmallVector<Type, 4> resultTensorTypes;
-    for (OpOperand *opOperand : linalgOp.getDpsInitOperands())
+    for (OpOperand &opOperand : linalgOp.getDpsInitsMutable())
       resultTensorTypes.push_back(
-          tiledOperands[opOperand->getOperandNumber()].getType());
+          tiledOperands[opOperand.getOperandNumber()].getType());
 
     Operation *newOp =
         clone(rewriter, linalgOp, resultTensorTypes, tiledOperands);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
index 6b06c32d22eba7d..4322b6e77eb8fcf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
@@ -97,8 +97,8 @@ class FoldConstantBase : public OpRewritePattern<GenericOp> {
                       [](AffineMap map) { return map.isPermutation(); }))
       return failure();
 
-    for (OpOperand *operand : genericOp.getDpsInitOperands()) {
-      if (genericOp.payloadUsesValueFromOperand(operand))
+    for (OpOperand &operand : genericOp.getDpsInitsMutable()) {
+      if (genericOp.payloadUsesValueFromOperand(&operand))
         return failure();
     }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
index 42f87a16c92f308..eae03924fb5c7bd 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
@@ -235,8 +235,8 @@ DecomposeLinalgOp::createResidualGenericOp(GenericOp genericOp,
     indexingMaps.push_back(
         peeledGenericOp.getIndexingMapMatchingResult(result));
   }
-  for (OpOperand *outOperand : genericOp.getDpsInitOperands())
-    indexingMaps.push_back(genericOp.getMatchingIndexingMap(outOperand));
+  for (OpOperand &outOperand : genericOp.getDpsInitsMutable())
+    indexingMaps.push_back(genericOp.getMatchingIndexingMap(&outOperand));
 
   auto indexingMapAttr = rewriter.getAffineMapArrayAttr(indexingMaps);
   return rewriter.create<GenericOp>(
@@ -263,8 +263,8 @@ DecomposeLinalgOp::matchAndRewrite(GenericOp genericOp,
         genericOp, "only operations with tensor semantics are handled");
   }
 
-  if (llvm::any_of(genericOp.getDpsInitOperands(), [&](OpOperand *outOperand) {
-        return !genericOp.getMatchingIndexingMap(outOperand).isPermutation();
+  if (llvm::any_of(genericOp.getDpsInitsMutable(), [&](OpOperand &outOperand) {
+        return !genericOp.getMatchingIndexingMap(&outOperand).isPermutation();
       })) {
     return rewriter.notifyMatchFailure(
         genericOp, "unhandled decomposition of generic op with out operand not "

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index fa901dfd1f87cdb..2e3610b7c08d9da 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -88,12 +88,12 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
     if (genericOp.getNumParallelLoops() != genericOp.getNumLoops())
       return failure();
 
-    auto outputOperands = genericOp.getDpsInitOperands();
+    auto outputOperands = genericOp.getDpsInitsMutable();
     SetVector<OpOperand *> candidates;
-    for (OpOperand *op : outputOperands) {
-      if (genericOp.getMatchingBlockArgument(op).use_empty())
+    for (OpOperand &op : outputOperands) {
+      if (genericOp.getMatchingBlockArgument(&op).use_empty())
         continue;
-      candidates.insert(op);
+      candidates.insert(&op);
     }
 
     if (candidates.empty())
@@ -101,7 +101,7 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
 
     // Compute the modified indexing maps.
     int64_t origNumInput = genericOp.getNumDpsInputs();
-    SmallVector<Value> newInputOperands = genericOp.getDpsInputOperands();
+    SmallVector<Value> newInputOperands = genericOp.getDpsInputs();
     SmallVector<AffineMap> indexingMaps = genericOp.getIndexingMapsArray();
     SmallVector<AffineMap> newIndexingMaps;
     newIndexingMaps.append(indexingMaps.begin(),
@@ -114,7 +114,8 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
                            indexingMaps.end());
 
     Location loc = genericOp.getLoc();
-    SmallVector<Value> newOutputOperands = outputOperands;
+    SmallVector<Value> newOutputOperands =
+        llvm::to_vector(genericOp.getDpsInits());
     for (OpOperand *op : candidates) {
       OpBuilder::InsertionGuard guard(rewriter);
       rewriter.setInsertionPointAfterValue(op->get());
@@ -122,7 +123,7 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
       auto empty = rewriter.create<tensor::EmptyOp>(
           loc, tensor::getMixedSizes(rewriter, loc, op->get()), elemType);
 
-      auto [start, end] = genericOp.getDpsInitsPositionRange();
+      unsigned start = genericOp.getDpsInits().getBeginOperandIndex();
       newOutputOperands[op->getOperandNumber() - start] = empty.getResult();
     }
 
@@ -145,9 +146,9 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
       mapper.map(bbarg, block->addArgument(bbarg.getType(), loc));
     }
 
-    for (OpOperand *op : outputOperands) {
-      BlockArgument bbarg = genericOp.getMatchingBlockArgument(op);
-      if (candidates.count(op))
+    for (OpOperand &op : outputOperands) {
+      BlockArgument bbarg = genericOp.getMatchingBlockArgument(&op);
+      if (candidates.count(&op))
         block->addArgument(bbarg.getType(), loc);
       else
         mapper.map(bbarg, block->addArgument(bbarg.getType(), loc));

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 6a01c24f026990f..17346607fa9cd7f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -340,23 +340,23 @@ mlir::linalg::fuseElementwiseOps(RewriterBase &rewriter,
   }
 
   // 6. Collect all of the producer outputs.
-  for (const auto &opOperand : llvm::enumerate(producer.getDpsInitOperands())) {
+  for (const auto &opOperand : llvm::enumerate(producer.getDpsInitsMutable())) {
     if (!preservedProducerResults.count(opOperand.index()))
       continue;
 
-    fusedOutputOperands.push_back(opOperand.value()->get());
+    fusedOutputOperands.push_back(opOperand.value().get());
     AffineMap map = getIndexingMapOfProducerOperandsInCoordinatesOfFusedOp(
-        opOperand.value(), producerResultIndexMap,
+        &opOperand.value(), producerResultIndexMap,
         consumer.getMatchingIndexingMap(fusedOperand));
     fusedIndexMaps.push_back(map);
-    fusedResultTypes.push_back(opOperand.value()->get().getType());
+    fusedResultTypes.push_back(opOperand.value().get().getType());
   }
 
   // 7. All of consumer's output operands (skip operands: added by the builder).
-  for (OpOperand *opOperand : consumer.getDpsInitOperands()) {
-    fusedOutputOperands.push_back(opOperand->get());
-    fusedIndexMaps.push_back(consumer.getMatchingIndexingMap(opOperand));
-    Type resultType = opOperand->get().getType();
+  for (OpOperand &opOperand : consumer.getDpsInitsMutable()) {
+    fusedOutputOperands.push_back(opOperand.get());
+    fusedIndexMaps.push_back(consumer.getMatchingIndexingMap(&opOperand));
+    Type resultType = opOperand.get().getType();
     if (!isa<MemRefType>(resultType))
       fusedResultTypes.push_back(resultType);
   }
@@ -812,12 +812,12 @@ fuseWithReshapeByExpansion(GenericOp genericOp, Operation *reshapeOp,
 
   Location loc = genericOp.getLoc();
   SmallVector<Value> outputs;
-  for (OpOperand *opOperand : genericOp.getDpsInitOperands()) {
-    AffineMap indexingMap = genericOp.getMatchingIndexingMap(opOperand);
-    auto opOperandType = cast<RankedTensorType>(opOperand->get().getType());
+  for (OpOperand &opOperand : genericOp.getDpsInitsMutable()) {
+    AffineMap indexingMap = genericOp.getMatchingIndexingMap(&opOperand);
+    auto opOperandType = cast<RankedTensorType>(opOperand.get().getType());
     RankedTensorType expandedOutputType =
         getExpandedType(opOperandType, indexingMap, expansionInfo);
-    if (expandedOutputType != opOperand->get().getType()) {
+    if (expandedOutputType != opOperand.get().getType()) {
       SmallVector<ReassociationIndices> reassociation =
           getReassociationForExpansion(indexingMap, expansionInfo);
       if (failed(reshapeLikeShapesAreCompatible(
@@ -829,10 +829,10 @@ fuseWithReshapeByExpansion(GenericOp genericOp, Operation *reshapeOp,
               /*isExpandingReshape=*/true)))
         return std::nullopt;
       outputs.push_back(rewriter.create<tensor::ExpandShapeOp>(
-          genericOp.getLoc(), expandedOutputType, opOperand->get(),
+          genericOp.getLoc(), expandedOutputType, opOperand.get(),
           reassociation));
     } else {
-      outputs.push_back(opOperand->get());
+      outputs.push_back(opOperand.get());
     }
   }
 
@@ -1495,9 +1495,9 @@ FailureOr<SmallVector<Value>> mlir::linalg::collapseGenericOpIterationDims(
   SmallVector<Value> outputOperands;
   resultTypes.reserve(genericOp.getNumDpsInits());
   outputOperands.reserve(genericOp.getNumDpsInits());
-  for (OpOperand *output : genericOp.getDpsInitOperands()) {
-    Value newOutput =
-        getCollapsedOpOperand(loc, genericOp, output, collapsingInfo, rewriter);
+  for (OpOperand &output : genericOp.getDpsInitsMutable()) {
+    Value newOutput = getCollapsedOpOperand(loc, genericOp, &output,
+                                            collapsingInfo, rewriter);
     outputOperands.push_back(newOutput);
     resultTypes.push_back(newOutput.getType());
   }
@@ -1703,9 +1703,9 @@ class FoldScalarOrSplatConstant : public OpRewritePattern<GenericOp> {
         fusedOperands.push_back(inputValue);
         fusedLocs.push_back(inputValue.getLoc());
       }
-      for (OpOperand *outputOperand : genericOp.getDpsInitOperands())
+      for (OpOperand &outputOperand : genericOp.getDpsInitsMutable())
         fusedIndexMaps.push_back(
-            genericOp.getMatchingIndexingMap(outputOperand));
+            genericOp.getMatchingIndexingMap(&outputOperand));
 
       // Check if the operation shapes to loops map is computable.
       if (!inversePermutation(concatAffineMaps(fusedIndexMaps))) {
@@ -1763,9 +1763,9 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
     rewriter.startRootUpdate(op);
     bool modifiedOutput = false;
     Location loc = op.getLoc();
-    for (OpOperand *opOperand : op.getDpsInitOperands()) {
-      if (!op.payloadUsesValueFromOperand(opOperand)) {
-        Value operandVal = opOperand->get();
+    for (OpOperand &opOperand : op.getDpsInitsMutable()) {
+      if (!op.payloadUsesValueFromOperand(&opOperand)) {
+        Value operandVal = opOperand.get();
         auto operandType = dyn_cast<RankedTensorType>(operandVal.getType());
         if (!operandType)
           continue;
@@ -1783,7 +1783,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
             tensor::getMixedSizes(rewriter, loc, operandVal);
         Value emptyTensor = rewriter.create<tensor::EmptyOp>(
             loc, mixedSizes, operandType.getElementType());
-        op->setOperand(opOperand->getOperandNumber(), emptyTensor);
+        op->setOperand(opOperand.getOperandNumber(), emptyTensor);
       }
     }
     if (!modifiedOutput) {

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp
index 4b754065e318971..5a8320bdb287533 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp
@@ -22,17 +22,17 @@ using namespace mlir::linalg;
 /// Get an output operand that matches the given input operand and can be used
 /// to eliminate a tensor.empty op.
 static OpOperand *getUnusedOutOperand(LinalgOp op, OpOperand *in) {
-  for (OpOperand *operand : op.getDpsInitOperands()) {
+  for (OpOperand &operand : op.getDpsInitsMutable()) {
     // Operand must be unused.
-    if (op.payloadUsesValueFromOperand(operand))
+    if (op.payloadUsesValueFromOperand(&operand))
       continue;
     // Types must match.
-    if (operand->get().getType() != in->get().getType())
+    if (operand.get().getType() != in->get().getType())
       continue;
     // Indexing maps must match.
-    if (op.getMatchingIndexingMap(operand) != op.getMatchingIndexingMap(in))
+    if (op.getMatchingIndexingMap(&operand) != op.getMatchingIndexingMap(in))
       continue;
-    return operand;
+    return &operand;
   }
   return nullptr;
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
index c89fc5b9da8d383..4e54e48c914aeb8 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
@@ -184,10 +184,11 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
     // If the op doesn't have tensor semantics or outputs should not be removed,
     // keep all the outputs as preserved.
     if (!genericOp.hasTensorSemantics() || !removeOutputs) {
-      for (const auto &en : llvm::enumerate(genericOp.getDpsInitOperands())) {
+      for (const auto &en : llvm::enumerate(genericOp.getDpsInitsMutable())) {
         origToNewPos[en.index()] = newOutputOperands.size();
-        newOutputOperands.push_back(en.value()->get());
-        newIndexingMaps.push_back(genericOp.getMatchingIndexingMap(en.value()));
+        newOutputOperands.push_back(en.value().get());
+        newIndexingMaps.push_back(
+            genericOp.getMatchingIndexingMap(&en.value()));
       }
       return origToNewPos;
     }
@@ -198,25 +199,25 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
     //   computation.
     auto yieldOp = cast<YieldOp>(genericOp.getBody()->getTerminator());
     for (const auto &outputOpOperand :
-         llvm::enumerate(genericOp.getDpsInitOperands())) {
-      OpResult result = genericOp.getTiedOpResult(outputOpOperand.value());
+         llvm::enumerate(genericOp.getDpsInitsMutable())) {
+      OpResult result = genericOp.getTiedOpResult(&outputOpOperand.value());
       AffineMap indexingMap =
-          genericOp.getMatchingIndexingMap(outputOpOperand.value());
-      auto key = std::make_tuple(outputOpOperand.value()->get(), indexingMap,
+          genericOp.getMatchingIndexingMap(&outputOpOperand.value());
+      auto key = std::make_tuple(outputOpOperand.value().get(), indexingMap,
                                  yieldOp->getOperand(outputOpOperand.index()));
       if (isResultValueDead(genericOp, result)) {
         // Check if the opoperand can be dropped without affecting loop
         // bound computation. Add the operand to the list of dropped op
         // operand for checking. If it cannot be dropped, need to pop the
         // value back.
-        droppedOpOperands.push_back(outputOpOperand.value());
+        droppedOpOperands.push_back(&outputOpOperand.value());
         if (genericOp.canOpOperandsBeDropped(droppedOpOperands)) {
           continue;
         }
         droppedOpOperands.pop_back();
       }
 
-      if (!genericOp.payloadUsesValueFromOperand(outputOpOperand.value())) {
+      if (!genericOp.payloadUsesValueFromOperand(&outputOpOperand.value())) {
         // The out operand can also be dropped if it is computed redundantly
         // by another result, the conditions for that are
         // - The same operand is used as the out operand
@@ -225,16 +226,16 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
         auto it = dedupedOutpts.find(key);
         if (it != dedupedOutpts.end()) {
           origToNewPos[outputOpOperand.index()] = it->second;
-          droppedOpOperands.push_back(outputOpOperand.value());
+          droppedOpOperands.push_back(&outputOpOperand.value());
           continue;
         }
       }
 
       origToNewPos[outputOpOperand.index()] = newOutputOperands.size();
       dedupedOutpts[key] = newOutputOperands.size();
-      newOutputOperands.push_back(outputOpOperand.value()->get());
+      newOutputOperands.push_back(outputOpOperand.value().get());
       newIndexingMaps.push_back(
-          genericOp.getMatchingIndexingMap(outputOpOperand.value()));
+          genericOp.getMatchingIndexingMap(&outputOpOperand.value()));
     }
     return origToNewPos;
   }
@@ -254,7 +255,8 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
     // Replace all arguments in the original op, with arguments from the
     // canonicalized op.
     auto updateReplacements =
-        [&](OpOperandVector &origOperands, OpOperandVector &newOperands,
+        [&](SmallVector<OpOperand *> &origOperands,
+            SmallVector<OpOperand *> &newOperands,
             const llvm::SmallDenseMap<unsigned, unsigned> &map) {
           for (const auto &origOperand : llvm::enumerate(origOperands)) {
             auto it = map.find(origOperand.index());
@@ -266,12 +268,17 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
           }
         };
 
-    OpOperandVector origInputOperands = genericOp.getDpsInputOperands();
-    OpOperandVector newInputOperands = newOp.getDpsInputOperands();
+    SmallVector<OpOperand *> origInputOperands =
+        genericOp.getDpsInputOperands();
+    SmallVector<OpOperand *> newInputOperands = newOp.getDpsInputOperands();
     updateReplacements(origInputOperands, newInputOperands, origInsToNewInsPos);
 
-    OpOperandVector origOutputOperands = genericOp.getDpsInitOperands();
-    OpOperandVector newOutputOperands = newOp.getDpsInitOperands();
+    SmallVector<OpOperand *> origOutputOperands =
+        llvm::to_vector(llvm::map_range(genericOp.getDpsInitsMutable(),
+                                        [](OpOperand &o) { return &o; }));
+    SmallVector<OpOperand *> newOutputOperands =
+        llvm::to_vector(llvm::map_range(newOp.getDpsInitsMutable(),
+                                        [](OpOperand &o) { return &o; }));
     updateReplacements(origOutputOperands, newOutputOperands,
                        origOutsToNewOutsPos);
 
@@ -316,7 +323,7 @@ struct RemoveUnusedCycleInGenericOp : public OpRewritePattern<GenericOp> {
     bool hasRemovedCycles = false;
     // Iterate over output operands and remove any unused cycles.
     for (const auto &outputOpOperand :
-         llvm::enumerate(genericOp.getDpsInitOperands())) {
+         llvm::enumerate(genericOp.getDpsInits())) {
 
       // Check that result from out operand is dead.
       Value result = genericOp.getResult(outputOpOperand.index());

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index ae0461965c4785c..d83ec725e082092 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -150,8 +150,8 @@ static LinalgOp fuse(OpBuilder &b, LinalgOp producer,
   // fully dynamic at construction time.
   SmallVector<Type, 4> resultTypes;
   resultTypes.reserve(producer->getNumResults());
-  for (OpOperand *operand : producer.getDpsInitOperands()) {
-    auto tensorType = dyn_cast<RankedTensorType>(operand->get().getType());
+  for (Value operand : producer.getDpsInits()) {
+    auto tensorType = dyn_cast<RankedTensorType>(operand.getType());
     if (!tensorType)
       continue;
     unsigned rank = tensorType.getRank();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
index 2382903bf3785bf..1d9ce4144f998de 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
@@ -55,8 +55,8 @@ FailureOr<GenericOp> mlir::linalg::generalizeNamedOp(RewriterBase &rewriter,
   if (failed(generalizeNamedOpPrecondition(linalgOp)))
     return rewriter.notifyMatchFailure(linalgOp, "preconditions not met");
 
-  SmallVector<Value> inputs = linalgOp.getDpsInputOperands();
-  SmallVector<Value> outputs = linalgOp.getDpsInitOperands();
+  SmallVector<Value> inputs = linalgOp.getDpsInputs();
+  ValueRange outputs = linalgOp.getDpsInits();
   SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMapsArray();
   SmallVector<utils::IteratorType> iterators = linalgOp.getIteratorTypesArray();
   SmallVector<Type> resultTypes = linalgOp.hasTensorSemantics()

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
index 9cf8f7c4e1ca0ae..cc39fe932c24bfc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
@@ -54,8 +54,9 @@ struct InlineScalarOperands : public OpRewritePattern<GenericOp> {
     if (scalarOperands.empty())
       return failure();
 
-    for (OpOperand *opOperand : genericOp.getDpsInitOperands())
-      newIndexingMaps.emplace_back(genericOp.getMatchingIndexingMap(opOperand));
+    for (OpOperand &opOperand : genericOp.getDpsInitsMutable())
+      newIndexingMaps.emplace_back(
+          genericOp.getMatchingIndexingMap(&opOperand));
 
     Location loc = genericOp->getLoc();
     SmallVector<Value> outputOperands = genericOp.getOutputs();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 72b684aaa864c7a..79e295b937b9374 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -149,11 +149,12 @@ static void emitScalarImplementation(OpBuilder &b, Location loc,
         b.create<LoadOpTy>(loc, inputOperand->get(), indexing));
   }
   // 1.b. Emit load from output views.
-  for (OpOperand *outputOperand : linalgOp.getDpsInitOperands()) {
+  for (OpOperand &outputOperand : linalgOp.getDpsInitsMutable()) {
     SmallVector<Value> indexing = makeCanonicalAffineApplies(
-        b, loc, linalgOp.getMatchingIndexingMap(outputOperand), allIvsPlusDims);
+        b, loc, linalgOp.getMatchingIndexingMap(&outputOperand),
+        allIvsPlusDims);
     indexedValues.push_back(
-        b.create<LoadOpTy>(loc, outputOperand->get(), indexing));
+        b.create<LoadOpTy>(loc, outputOperand.get(), indexing));
   }
 
   // TODO: When a region inliner exists, use it.
@@ -161,13 +162,13 @@ static void emitScalarImplementation(OpBuilder &b, Location loc,
   // 3. Emit store.
   SmallVector<SmallVector<Value>, 8> indexing;
   SmallVector<Value> outputBuffers;
-  for (OpOperand *outputOperand : linalgOp.getDpsInitOperands()) {
-    if (!isa<MemRefType>(outputOperand->get().getType()))
+  for (OpOperand &outputOperand : linalgOp.getDpsInitsMutable()) {
+    if (!isa<MemRefType>(outputOperand.get().getType()))
       continue;
     indexing.push_back(makeCanonicalAffineApplies(
-        b, loc, linalgOp.getMatchingIndexingMap(outputOperand),
+        b, loc, linalgOp.getMatchingIndexingMap(&outputOperand),
         allIvsPlusDims));
-    outputBuffers.push_back(outputOperand->get());
+    outputBuffers.push_back(outputOperand.get());
   }
   inlineRegionAndEmitStore<LoadOpTy, StoreOpTy>(b, loc, linalgOp, indexedValues,
                                                 indexing, outputBuffers);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
index 8fe745d97ca3dd8..a74a3c2c500406f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
@@ -238,18 +238,18 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
              opToPad.getNumDpsInits() &&
          "expected matching number of results");
   for (auto it :
-       llvm::zip(paddedSubtensorResults, opToPad.getDpsInitOperands())) {
+       llvm::zip(paddedSubtensorResults, opToPad.getDpsInitsMutable())) {
     if (options.copyBackOp == LinalgPaddingOptions::CopyBackOp::LinalgCopy) {
       replacements.push_back(rewriter
                                  .create<linalg::CopyOp>(loc, std::get<0>(it),
-                                                         std::get<1>(it)->get())
+                                                         std::get<1>(it).get())
                                  .getResult(0));
     } else if (options.copyBackOp ==
                LinalgPaddingOptions::CopyBackOp::
                    BufferizationMaterializeInDestination) {
       replacements.push_back(
           rewriter.create<bufferization::MaterializeInDestinationOp>(
-              loc, std::get<0>(it), std::get<1>(it)->get()));
+              loc, std::get<0>(it), std::get<1>(it).get()));
     } else {
       llvm_unreachable("unsupported copy back op");
     }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
index 6c859b6cb70eb52..6559c86c9e0ff50 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
@@ -192,8 +192,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
 
   auto reduction = b.create<GenericOp>(
       loc, op->getResultTypes(), ValueRange({genericOp.getResult(0)}),
-      SmallVector<Value>{op.getDpsInitOperands()}, reductionMaps,
-      reductionIteratorTypes,
+      op.getDpsInits(), reductionMaps, reductionIteratorTypes,
       [reductionOp](OpBuilder &b, Location loc, ValueRange inputs) {
         Operation *clonedReductionOp = b.clone(*reductionOp);
         clonedReductionOp->setOperand(0, inputs[0]);
@@ -308,8 +307,8 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   SmallVector<Operation *> emptyOrAllocTensorOps;
   SmallVector<linalg::FillOp> fillOps;
   fillOps.reserve(op.getNumDpsInits());
-  for (auto it : llvm::zip(op.getDpsInitOperands(), neutralElements)) {
-    Value rankedTensor = std::get<0>(it)->get();
+  for (auto it : llvm::zip(op.getDpsInitsMutable(), neutralElements)) {
+    Value rankedTensor = std::get<0>(it).get();
     auto t = cast<RankedTensorType>(rankedTensor.getType());
     RankedTensorType newT = RankedTensorType::Builder(t).insertDim(
         reductionDimSize / splitFactor, insertSplitDimension);
@@ -345,13 +344,13 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   // TODO: a subset of these may not reduce along reducePos and should be
   // reindexed: k -> k * splitFactor + k', when multi-reduction support is
   // available.
-  for (OpOperand *o : op.getDpsInitOperands())
-    newMaps.push_back(insertParallelDim(op, *o, reductionDimPos,
+  for (OpOperand &o : op.getDpsInitsMutable())
+    newMaps.push_back(insertParallelDim(op, o, reductionDimPos,
                                         reductionDimSize / splitFactor));
 
   // Step 3. Handle operands.
   // Compute the new input tensors.
-  SmallVector<Value> newInputs(op.getDpsInputOperands());
+  SmallVector<Value> newInputs = op.getDpsInputs();
   // Add a single shape-only tensor to carry the dimensions without resorting to
   // more complex inversions.
   newInputs.push_back(b.create<tensor::EmptyOp>(
@@ -380,10 +379,10 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   // TODO: all results can be handled in a single GenericOp, when
   // multi-reduction support is available.
   SmallVector<LinalgOp> results;
-  for (auto it : llvm::zip(genericOp->getResults(), op.getDpsInitOperands(),
-                           combinerOps)) {
+  for (auto it :
+       llvm::zip(genericOp->getResults(), op.getDpsInits(), combinerOps)) {
     Value reindexedOutput = std::get<0>(it);
-    Value originalOutput = std::get<1>(it)->get();
+    Value originalOutput = std::get<1>(it);
     auto originalOutputType = cast<RankedTensorType>(originalOutput.getType());
     Operation *combinerOp = std::get<2>(it);
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 2c6afd4c2e6d99f..472e6fa3ab27b22 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -368,14 +368,14 @@ static FailureOr<ForallTilingResult> tileToForallOpImpl(
     Operation *clonedOp = b.clone(*op.getOperation());
     auto destinationStyleOp = dyn_cast<DestinationStyleOpInterface>(clonedOp);
     if (destinationStyleOp) {
-      for (OpOperand *outOperand : destinationStyleOp.getDpsInitOperands()) {
+      for (OpOperand &outOperand : destinationStyleOp.getDpsInitsMutable()) {
         // Swap tensor inits with the corresponding block argument of the
         // scf.forall op. Memref inits remain as is.
-        if (outOperand->get().getType().isa<TensorType>()) {
-          auto *it = llvm::find(dest, outOperand->get());
+        if (outOperand.get().getType().isa<TensorType>()) {
+          auto *it = llvm::find(dest, outOperand.get());
           assert(it != dest.end() && "could not find destination tensor");
           unsigned destNum = std::distance(dest.begin(), it);
-          outOperand->set(destBbArgs[destNum]);
+          outOperand.set(destBbArgs[destNum]);
         }
       }
     }
@@ -702,8 +702,8 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
     b.setInsertionPoint(forallOp.getTerminator());
 
     SmallVector<Value> tiledDpsInitOperands;
-    for (OpOperand *initOperand : destinationStyleOp.getDpsInitOperands()) {
-      auto *it = llvm::find(dest, initOperand->get());
+    for (Value initOperand : destinationStyleOp.getDpsInits()) {
+      auto *it = llvm::find(dest, initOperand);
       assert(it != dest.end() && "dest operand not found in dest");
       unsigned destNum = std::distance(dest.begin(), it);
       SmallVector<OpFoldResult> strides(numThreads.size(), b.getIndexAttr(1));
@@ -714,7 +714,7 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
       outOffsets[reductionDim] = forallOp.getInductionVars().front();
       // TODO: use SubsetExtractOpInterface once it is available.
       tiledDpsInitOperands.push_back(b.create<tensor::ExtractSliceOp>(
-          loc, cast<RankedTensorType>(initOperand->get().getType()),
+          loc, cast<RankedTensorType>(initOperand.getType()),
           destBbArgs[destNum], outOffsets, sizes, strides));
     }
 
@@ -724,9 +724,9 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
     Operation *clonedOp = b.clone(*op.getOperation());
     b.updateRootInPlace(clonedOp, [&]() {
       for (auto [initOperandPtr, tiledInitValue] : llvm::zip_equal(
-               cast<DestinationStyleOpInterface>(clonedOp).getDpsInitOperands(),
+               cast<DestinationStyleOpInterface>(clonedOp).getDpsInitsMutable(),
                tiledDpsInitOperands)) {
-        initOperandPtr->set(tiledInitValue);
+        initOperandPtr.set(tiledInitValue);
       }
     });
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index cedaa4344a29581..5f566d8b10aef73 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -335,7 +335,7 @@ struct LinalgOpPartialReductionInterface
     }
 
     // Step 1: Extract a slice of the input operands.
-    SmallVector<Value> valuesToTile = linalgOp.getDpsInputOperands();
+    SmallVector<Value> valuesToTile = linalgOp.getDpsInputs();
     SmallVector<Value, 4> tiledOperands = makeTiledShapes(
         b, loc, linalgOp, valuesToTile, offsets, sizes, {}, true);
 
@@ -397,8 +397,7 @@ struct LinalgOpPartialReductionInterface
 
     auto reduction = b.create<GenericOp>(
         loc, op->getResultTypes(), ValueRange({partialReduce[0]}),
-        SmallVector<Value>{linalgOp.getDpsInitOperands()}, reductionMaps,
-        reductionIteratorTypes,
+        linalgOp.getDpsInits(), reductionMaps, reductionIteratorTypes,
         [reductionOp](OpBuilder &b, Location loc, ValueRange inputs) {
           Operation *clonedReductionOp = b.clone(*reductionOp);
           clonedReductionOp->setOperand(0, inputs[0]);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index a2d219f669905e0..49fe937741c77c9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -554,11 +554,13 @@ FailureOr<PackResult> linalg::pack(RewriterBase &rewriter,
 
   // Step 2. Propagate packing to all LinalgOp operands.
   SmallVector<Value> inputsAndInits, results;
-  for (const auto &operandsList :
-       {linalgOp.getDpsInputOperands(), linalgOp.getDpsInitOperands()}) {
-    for (OpOperand *opOperandPtr : operandsList) {
-      int64_t pos = opOperandPtr->getOperandNumber();
-      Value operand = opOperandPtr->get();
+  SmallVector<OpOperand *> initOperands = llvm::to_vector(llvm::map_range(
+      linalgOp.getDpsInitsMutable(), [](OpOperand &o) { return &o; }));
+  SmallVector<OpOperand *> inputOperands = linalgOp.getDpsInputOperands();
+  for (const auto &operandsList : {inputOperands, initOperands}) {
+    for (OpOperand *opOperand : operandsList) {
+      int64_t pos = opOperand->getOperandNumber();
+      Value operand = opOperand->get();
       SmallVector<int64_t> innerPos =
           listOfPackedOperandsDim.extractPackedDimsForOperand(pos);
       SmallVector<OpFoldResult> innerPackSizes =

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 51a83c35e4cda09..6d8a96a3ad23f3e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1450,12 +1450,12 @@ static LogicalResult reductionPreconditions(LinalgOp op) {
     LDBG("reduction precondition failed: no reduction iterator\n");
     return failure();
   }
-  for (OpOperand *opOperand : op.getDpsInitOperands()) {
-    AffineMap indexingMap = op.getMatchingIndexingMap(opOperand);
+  for (OpOperand &opOperand : op.getDpsInitsMutable()) {
+    AffineMap indexingMap = op.getMatchingIndexingMap(&opOperand);
     if (indexingMap.isPermutation())
       continue;
 
-    Operation *reduceOp = matchLinalgReduction(opOperand);
+    Operation *reduceOp = matchLinalgReduction(&opOperand);
     if (!reduceOp || !getCombinerOpKind(reduceOp)) {
       LDBG("reduction precondition failed: reduction detection failed\n");
       return failure();

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index eb62dcaba139e6b..f177235acff7238 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -174,8 +174,8 @@ bool isElementwise(LinalgOp op) {
     return false;
 
   // TODO: relax the restrictions on indexing map.
-  for (OpOperand *opOperand : op.getDpsInitOperands()) {
-    if (!op.getMatchingIndexingMap(opOperand).isPermutation())
+  for (OpOperand &opOperand : op.getDpsInitsMutable()) {
+    if (!op.getMatchingIndexingMap(&opOperand).isPermutation())
       return false;
   }
   return hasOnlyScalarElementwiseOp(op->getRegion(0));
@@ -321,10 +321,9 @@ void GenerateLoopNest<scf::ForOp>::doit(
   assert((procInfo.empty() || (procInfo.size() == loopRanges.size())) &&
          "expected as many entries for proc info as number of loops, even if "
          "they are null entries");
-  SmallVector<Value> iterArgInitValues = linalgOp.hasBufferSemantics()
-                                             ? SmallVector<Value>{}
-                                             : linalgOp.getDpsInitOperands();
-
+  SmallVector<Value> iterArgInitValues;
+  if (!linalgOp.hasBufferSemantics())
+    llvm::append_range(iterArgInitValues, linalgOp.getDpsInits());
   SmallVector<Value, 4> lbs, ubs, steps;
   unpackRanges(b, loc, loopRanges, lbs, ubs, steps);
   LoopNest loopNest = mlir::scf::buildLoopNest(
@@ -334,7 +333,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
                "expect the number of output tensors and iter args to match");
         SmallVector<Value> operandValuesToUse = linalgOp->getOperands();
         if (!iterArgs.empty()) {
-          operandValuesToUse = linalgOp.getDpsInputOperands();
+          operandValuesToUse = linalgOp.getDpsInputs();
           operandValuesToUse.append(iterArgs.begin(), iterArgs.end());
         }
         return bodyBuilderFn(b, loc, ivs, operandValuesToUse);
@@ -362,9 +361,9 @@ void GenerateLoopNest<AffineForOp>::doit(
                                   ValueRange)>
         bodyBuilderFn,
     ArrayRef<linalg::ProcInfo> /*procInfo*/) {
-  SmallVector<Value> iterArgInitValues = linalgOp.hasBufferSemantics()
-                                             ? SmallVector<Value>{}
-                                             : linalgOp.getDpsInitOperands();
+  SmallVector<Value> iterArgInitValues;
+  if (!linalgOp.hasBufferSemantics())
+    llvm::append_range(iterArgInitValues, linalgOp.getDpsInits());
   assert(iterArgInitValues.empty() && "unexpected AffineForOp init values");
   SmallVector<Value, 4> lbs, ubs, steps;
   unpackRanges(b, loc, loopRanges, lbs, ubs, steps);
@@ -529,9 +528,9 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
                                   ValueRange)>
         bodyBuilderFn,
     ArrayRef<linalg::ProcInfo> procInfo) {
-  SmallVector<Value> iterArgInitValues = linalgOp.hasBufferSemantics()
-                                             ? SmallVector<Value>{}
-                                             : linalgOp.getDpsInitOperands();
+  SmallVector<Value> iterArgInitValues;
+  if (!linalgOp.hasBufferSemantics())
+    llvm::append_range(iterArgInitValues, linalgOp.getDpsInits());
   assert(iterArgInitValues.empty() && "unexpected ParallelOp init values");
   // This function may be passed more iterator types than ranges.
   assert(iteratorTypes.size() >= loopRanges.size() &&
@@ -742,8 +741,8 @@ SmallVector<Type> getTensorOutputTypes(LinalgOp op, ValueRange operands) {
   if (op.hasBufferSemantics())
     return {};
   return llvm::to_vector(
-      llvm::map_range(op.getDpsInitOperands(), [&](OpOperand *opOperand) {
-        return operands[opOperand->getOperandNumber()].getType();
+      llvm::map_range(op.getDpsInitsMutable(), [&](OpOperand &opOperand) {
+        return operands[opOperand.getOperandNumber()].getType();
       }));
 }
 
@@ -756,10 +755,10 @@ SmallVector<Value> insertSlicesBack(OpBuilder &builder, Location loc,
   tensorResults.reserve(results.size());
   // Insert a insert_slice for each output tensor.
   unsigned resultIdx = 0;
-  for (OpOperand *opOperand : op.getDpsInitOperands()) {
+  for (OpOperand &opOperand : op.getDpsInitsMutable()) {
     // TODO: use an interface/adaptor to avoid leaking position in
     // `tiledOperands`.
-    Value outputTensor = operands[opOperand->getOperandNumber()];
+    Value outputTensor = operands[opOperand.getOperandNumber()];
     if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
       Value inserted = builder.create<tensor::InsertSliceOp>(
           loc, sliceOp.getSource().getType(), results[resultIdx],

diff  --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 6cfba3fef15ebda..ab59eac2ac4d6f8 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -255,7 +255,8 @@ yieldTiledValues(RewriterBase &rewriter, ArrayRef<Value> initValues,
   for (auto tiledOp : tilingResult.tiledOps) {
     if (auto dstOp = dyn_cast<DestinationStyleOpInterface>(tiledOp)) {
       auto innerMostLoop = loops.back();
-      SmallVector<Value> tiledOpDestinationTensors = dstOp.getDpsInitOperands();
+      SmallVector<Value> tiledOpDestinationTensors =
+          llvm::to_vector(dstOp.getDpsInits());
       updateDestinationOperandsForTiledOp(rewriter, tiledOpDestinationTensors,
                                           innerMostLoop.getRegionIterArgs());
     }
@@ -447,7 +448,7 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b,
 
   auto dstOp = cast<DestinationStyleOpInterface>(parallelOp);
   auto innerMostLoop = loops.back();
-  SmallVector<Value> destinationTensors = dstOp.getDpsInitOperands();
+  SmallVector<Value> destinationTensors = llvm::to_vector(dstOp.getDpsInits());
   assert(destinationTensors.size() ==
              innerMostLoop.getRegionIterArgs().size() &&
          "unexpected number of outputs");

diff  --git a/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp b/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp
index f344ea656b24736..4e5ef66887cadf8 100644
--- a/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp
+++ b/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp
@@ -14,14 +14,6 @@ namespace mlir {
 #include "mlir/Interfaces/DestinationStyleOpInterface.cpp.inc"
 } // namespace mlir
 
-OpOperandVector::operator SmallVector<Value>() {
-  SmallVector<Value> result;
-  result.reserve(this->size());
-  llvm::transform(*this, std::back_inserter(result),
-                  [](OpOperand *opOperand) { return opOperand->get(); });
-  return result;
-}
-
 namespace {
 size_t getNumTensorResults(Operation *op) {
   size_t numTensorResults = 0;
@@ -39,13 +31,13 @@ LogicalResult detail::verifyDestinationStyleOpInterface(Operation *op) {
       cast<DestinationStyleOpInterface>(op);
 
   SmallVector<OpOperand *> outputTensorOperands;
-  for (OpOperand *operand : dstStyleOp.getDpsInitOperands()) {
-    Type type = operand->get().getType();
+  for (OpOperand &operand : dstStyleOp.getDpsInitsMutable()) {
+    Type type = operand.get().getType();
     if (isa<RankedTensorType>(type)) {
-      outputTensorOperands.push_back(operand);
+      outputTensorOperands.push_back(&operand);
     } else if (!isa<MemRefType>(type)) {
       return op->emitOpError("expected that operand #")
-             << operand->getOperandNumber()
+             << operand.getOperandNumber()
              << " is a ranked tensor or a ranked memref";
     }
   }

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
index 167ed8055206796..e41481a9e51364e 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
@@ -26,7 +26,7 @@ static void addOperands(Operation *op, SetVector<Value> &operandSet) {
     return;
   TypeSwitch<Operation *, void>(op)
       .Case<linalg::LinalgOp>([&](linalg::LinalgOp linalgOp) {
-        SmallVector<Value> inputOperands{linalgOp.getDpsInputOperands()};
+        SmallVector<Value> inputOperands = linalgOp.getDpsInputs();
         operandSet.insert(inputOperands.begin(), inputOperands.end());
       })
       .Default([&](Operation *operation) {

diff  --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 354a43c244e3bbe..6887f151eef7695 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -2350,9 +2350,8 @@ def TestDestinationStyleOp :
   }];
 
   let extraClassDeclaration = [{
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t numOperands = this->getNumOperands();
-      return {numOperands - getOutputs().size(), numOperands};
+    mlir::MutableOperandRange getDpsInitsMutable() {
+      return getOutputsMutable();
     }
   }];
 }
@@ -2412,9 +2411,8 @@ def TestLinalgConvOp :
       return "";
     }
 
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t getNumOperands = this->getNumOperands();
-      return {getNumOperands - 1, getNumOperands};
+    mlir::MutableOperandRange getDpsInitsMutable() {
+      return getOutputsMutable();
     }
   }];
 }
@@ -2474,9 +2472,8 @@ def TestLinalgFillOp :
       return "";
     }
 
-    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-      int64_t getNumOperands = this->getNumOperands();
-      return {getNumOperands - 1, getNumOperands};
+    mlir::MutableOperandRange getDpsInitsMutable() {
+      return getOutputsMutable();
     }
   }];
 }

diff  --git a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
index 29075cb78a1cc7b..ab7b86125f693bc 100644
--- a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
+++ b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
@@ -82,9 +82,8 @@ structured_op: !LinalgStructuredOpConfig
 #       ODS:    buildStructuredOp($_builder, $_state, resultTensorTypes,
 #  ODS-NEXT:      attributes, Test1Op::getRegionBuilder())
 
-#       ODS:    std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
-#  ODS-NEXT:      int64_t getNumOperands = this->getNumOperands();
-#  ODS-NEXT:      return {getNumOperands - getOutputs().size(), getNumOperands};
+#       ODS:    MutableOperandRange getDpsInitsMutable() {
+#  ODS-NEXT:      return getOutputsMutable()
 #  ODS-NEXT:    }
 
 # IMPL-LABEL:  void Test1Op::regionBuilder(ImplicitLocOpBuilder &b,

diff  --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
index 61cb5537f1df6de..664167e4f6c3471 100644
--- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
+++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
@@ -563,9 +563,8 @@ def {0} : LinalgStructuredBase_Op<"{1}", !listconcat([AttrSizedOperandSegments],
         return regionBuilder;
       }
 
-      std::pair<int64_t, int64_t> getDpsInitsPositionRange() {{
-        int64_t getNumOperands = this->getNumOperands();
-        return {{getNumOperands - getOutputs().size(), getNumOperands};
+      ::mlir::MutableOperandRange getDpsInitsMutable() {{
+        return getOutputsMutable();
       }
 
       // Generic methods.
@@ -661,7 +660,7 @@ void {0}::getEffects(SmallVectorImpl<
     SideEffects::EffectInstance<MemoryEffects::Effect> >&effects) {{
       if (hasTensorSemantics()) return;
       getGenericEffectsImpl(effects,
-        getOperation()->getResults(), getDpsInputOperands(), getDpsInitOperands());
+        getOperation()->getResults(), getDpsInputs(), getDpsInits());
 }
 )FMT";
 


        


More information about the Mlir-commits mailing list