[Mlir-commits] [mlir] cfb72fd - [mlir] Switch arith, llvm, std & shape dialects to accessors prefixed both form.

Jacques Pienaar llvmlistbot at llvm.org
Sun Oct 24 18:36:48 PDT 2021


Author: Jacques Pienaar
Date: 2021-10-24T18:36:33-07:00
New Revision: cfb72fd3a07064ee3599d5a4b7853b6247feb47f

URL: https://github.com/llvm/llvm-project/commit/cfb72fd3a07064ee3599d5a4b7853b6247feb47f
DIFF: https://github.com/llvm/llvm-project/commit/cfb72fd3a07064ee3599d5a4b7853b6247feb47f.diff

LOG: [mlir] Switch arith, llvm, std & shape dialects to accessors prefixed both form.

Following
https://llvm.discourse.group/t/psa-ods-generated-accessors-will-change-to-have-a-get-prefix-update-you-apis/4476,
this follows flipping these dialects to _Both prefixed form. This
changes the accessors to have a prefix. This was possibly mostly without
breaking breaking changes if the existing convenience methods were used.

(https://github.com/jpienaar/llvm-project/blob/main/clang-tools-extra/clang-tidy/misc/AddGetterCheck.cpp
was used to migrate the callers post flipping, using the output from
Operator.cpp)

Differential Revision: https://reviews.llvm.org/D112383

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
    mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticBase.td
    mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
    mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
    mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
    mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
    mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
    mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
    mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
    mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
    mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
    mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
    mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
    mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
    mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp
    mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
    mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
    mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
    mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
    mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
    mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
    mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp
    mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
    mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
    mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
    mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
    mlir/lib/Dialect/SCF/SCF.cpp
    mlir/lib/Dialect/Shape/IR/Shape.cpp
    mlir/lib/Dialect/Shape/Transforms/ShapeToShapeLowering.cpp
    mlir/lib/Dialect/Shape/Transforms/StructuralTypeConversions.cpp
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/StandardOps/IR/Ops.cpp
    mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
    mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp
    mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
    mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp
    mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
    mlir/lib/Dialect/Vector/VectorOps.cpp
    mlir/lib/Dialect/Vector/VectorUtils.cpp
    mlir/lib/TableGen/Operator.cpp
    mlir/lib/Target/Cpp/TranslateToCpp.cpp
    mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
    mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
    mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
    mlir/lib/Transforms/BufferResultsToOutParams.cpp
    mlir/lib/Transforms/NormalizeMemRefs.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h b/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
index 3a94c15692b12..87392a8f20a78 100644
--- a/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
+++ b/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
@@ -52,7 +52,7 @@ class ConstantIntOp : public arith::ConstantOp {
                     Type type);
 
   inline int64_t value() {
-    return arith::ConstantOp::value().cast<IntegerAttr>().getInt();
+    return arith::ConstantOp::getValue().cast<IntegerAttr>().getInt();
   }
 
   static bool classof(Operation *op);
@@ -68,7 +68,7 @@ class ConstantFloatOp : public arith::ConstantOp {
                     const APFloat &value, FloatType type);
 
   inline APFloat value() {
-    return arith::ConstantOp::value().cast<FloatAttr>().getValue();
+    return arith::ConstantOp::getValue().cast<FloatAttr>().getValue();
   }
 
   static bool classof(Operation *op);
@@ -83,7 +83,7 @@ class ConstantIndexOp : public arith::ConstantOp {
   static void build(OpBuilder &builder, OperationState &result, int64_t value);
 
   inline int64_t value() {
-    return arith::ConstantOp::value().cast<IntegerAttr>().getInt();
+    return arith::ConstantOp::getValue().cast<IntegerAttr>().getInt();
   }
 
   static bool classof(Operation *op);

diff  --git a/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticBase.td b/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticBase.td
index f22b427bf0e30..5c6b9a52cae37 100644
--- a/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticBase.td
+++ b/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticBase.td
@@ -22,6 +22,7 @@ def Arithmetic_Dialect : Dialect {
   }];
 
   let hasConstantMaterializer = 1;
+  let emitAccessorPrefix = kEmitAccessorPrefix_Both;
 }
 
 // The predicate indicates the type of the comparison to perform:

diff  --git a/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td b/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
index 4300a0a3ccd6b..b3ef7ed02b038 100644
--- a/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
+++ b/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
@@ -956,13 +956,7 @@ def Arith_CmpIOp : Arith_CompareOp<"cmpi"> {
   ];
 
   let extraClassDeclaration = [{
-    static StringRef getPredicateAttrName() { return "predicate"; }
     static arith::CmpIPredicate getPredicateByName(StringRef name);
-
-    arith::CmpIPredicate getPredicate() {
-      return (arith::CmpIPredicate) (*this)->getAttrOfType<IntegerAttr>(
-          getPredicateAttrName()).getInt();
-    }
   }];
 
   let hasFolder = 1;
@@ -1012,13 +1006,7 @@ def Arith_CmpFOp : Arith_CompareOp<"cmpf"> {
   ];
 
   let extraClassDeclaration = [{
-    static StringRef getPredicateAttrName() { return "predicate"; }
     static arith::CmpFPredicate getPredicateByName(StringRef name);
-
-    arith::CmpFPredicate getPredicate() {
-      return (arith::CmpFPredicate) (*this)->getAttrOfType<IntegerAttr>(
-          getPredicateAttrName()).getInt();
-    }
   }];
 
   let hasFolder = 1;

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index 71d418e1e9890..d285e7ae3ce8b 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -47,6 +47,8 @@ def LLVM_Dialect : Dialect {
     /// Name of the target triple attribute.
     static StringRef getTargetTripleAttrName() { return "llvm.target_triple"; }
   }];
+
+  let emitAccessorPrefix = kEmitAccessorPrefix_Both;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 99d0a1fc69e23..29c7446740a3c 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -1128,7 +1128,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
   let extraClassDeclaration = [{
     /// Return the LLVM type of the global.
     Type getType() {
-      return global_type();
+      return getGlobalType();
     }
     /// Return the initializer attribute if it exists, or a null attribute.
     Attribute getValueOrNull() {

diff  --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
index 3a3d995402735..66b77e96a2ad5 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
@@ -39,6 +39,7 @@ def ShapeDialect : Dialect {
 
   let hasConstantMaterializer = 1;
   let hasOperationAttrVerify = 1;
+  let emitAccessorPrefix = kEmitAccessorPrefix_Both;
 }
 
 def Shape_ShapeType : DialectType<ShapeDialect,

diff  --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
index ba8f03e899cd1..35d6e161e359c 100644
--- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
@@ -27,6 +27,7 @@ def StandardOps_Dialect : Dialect {
   let cppNamespace = "::mlir";
   let dependentDialects = ["arith::ArithmeticDialect"];
   let hasConstantMaterializer = 1;
+  let emitAccessorPrefix = kEmitAccessorPrefix_Both;
 }
 
 // Base class for Standard dialect ops.
@@ -306,7 +307,7 @@ def GenericAtomicRMWOp : Std_Op<"generic_atomic_rmw", [
 
     // The value stored in memref[ivs].
     Value getCurrentValue() {
-      return body().getArgument(0);
+      return getRegion().getArgument(0);
     }
     MemRefType getMemRefType() {
       return memref().getType().cast<MemRefType>();
@@ -366,7 +367,6 @@ def BranchOp : Std_Op<"br",
   let verifier = ?;
 
   let extraClassDeclaration = [{
-    Block *getDest();
     void setDest(Block *block);
 
     /// Erase the operand at 'index' from the operand list.
@@ -426,8 +426,6 @@ def CallOp : Std_Op<"call",
     }]>];
 
   let extraClassDeclaration = [{
-    StringRef getCallee() { return callee(); }
-    StringAttr getCalleeAttr() { return calleeAttr().getAttr(); }
     FunctionType getCalleeType();
 
     /// Get the argument operands to the called function.
@@ -457,7 +455,7 @@ def CallOp : Std_Op<"call",
 def CallIndirectOp : Std_Op<"call_indirect", [
       CallOpInterface,
       TypesMatchWith<"callee input types match argument types",
-                     "callee", "operands",
+                     "callee", "callee_operands",
                      "$_self.cast<FunctionType>().getInputs()">,
       TypesMatchWith<"callee result types match result types",
                      "callee", "results",
@@ -481,7 +479,8 @@ def CallIndirectOp : Std_Op<"call_indirect", [
     ```
   }];
 
-  let arguments = (ins FunctionType:$callee, Variadic<AnyType>:$operands);
+  let arguments = (ins FunctionType:$callee,
+                       Variadic<AnyType>:$callee_operands);
   let results = (outs Variadic<AnyType>:$results);
 
   let builders = [
@@ -492,7 +491,8 @@ def CallIndirectOp : Std_Op<"call_indirect", [
     }]>];
 
   let extraClassDeclaration = [{
-    Value getCallee() { return getOperand(0); }
+    // TODO: Remove once migrated callers.
+    ValueRange operands() { return getCalleeOperands(); }
 
     /// Get the argument operands to the called function.
     operand_range getArgOperands() {
@@ -509,7 +509,8 @@ def CallIndirectOp : Std_Op<"call_indirect", [
   let verifier = ?;
   let hasCanonicalizeMethod = 1;
 
-  let assemblyFormat = "$callee `(` $operands `)` attr-dict `:` type($callee)";
+  let assemblyFormat =
+    "$callee `(` $callee_operands `)` attr-dict `:` type($callee)";
 }
 
 //===----------------------------------------------------------------------===//
@@ -573,19 +574,6 @@ def CondBranchOp : Std_Op<"cond_br",
     // These are the indices into the dests list.
     enum { trueIndex = 0, falseIndex = 1 };
 
-    // The condition operand is the first operand in the list.
-    Value getCondition() { return getOperand(0); }
-
-    /// Return the destination if the condition is true.
-    Block *getTrueDest() {
-      return getSuccessor(trueIndex);
-    }
-
-    /// Return the destination if the condition is false.
-    Block *getFalseDest() {
-      return getSuccessor(falseIndex);
-    }
-
     // Accessors for operands to the 'true' destination.
     Value getTrueOperand(unsigned idx) {
       assert(idx < getNumTrueOperands());
@@ -597,8 +585,6 @@ def CondBranchOp : Std_Op<"cond_br",
       setOperand(getTrueDestOperandIndex() + idx, value);
     }
 
-    operand_range getTrueOperands() { return trueDestOperands(); }
-
     unsigned getNumTrueOperands()  { return getTrueOperands().size(); }
 
     /// Erase the operand at 'index' from the true operand list.
@@ -616,7 +602,8 @@ def CondBranchOp : Std_Op<"cond_br",
       setOperand(getFalseDestOperandIndex() + idx, value);
     }
 
-    operand_range getFalseOperands() { return falseDestOperands(); }
+    operand_range getTrueOperands() { return getTrueDestOperands(); }
+    operand_range getFalseOperands() { return getFalseDestOperands(); }
 
     unsigned getNumFalseOperands() { return getFalseOperands().size(); }
 
@@ -697,8 +684,6 @@ def ConstantOp : Std_Op<"constant",
   ];
 
   let extraClassDeclaration = [{
-    Attribute getValue() { return (*this)->getAttr("value"); }
-
     /// Returns true if a constant operation can be built with the given value
     /// and result type.
     static bool isBuildableWith(Attribute value, Type type);
@@ -974,12 +959,6 @@ def SelectOp : Std_Op<"select", [NoSideEffect,
       $_state.addTypes(trueValue.getType());
     }]>];
 
-  let extraClassDeclaration = [{
-      Value getCondition() { return condition(); }
-      Value getTrueValue() { return true_value(); }
-      Value getFalseValue() { return false_value(); }
-  }];
-
   let hasCanonicalizer = 1;
   let hasFolder = 1;
 }

diff  --git a/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp b/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
index 2dc37c640f53e..07078c77626e1 100644
--- a/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
+++ b/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
@@ -134,16 +134,16 @@ LogicalResult IndexCastOpLowering::matchAndRewrite(
       typeConverter->convertType(getElementTypeOrSelf(op.getResult()))
           .cast<IntegerType>();
   auto sourceElementType =
-      getElementTypeOrSelf(adaptor.in()).cast<IntegerType>();
+      getElementTypeOrSelf(adaptor.getIn()).cast<IntegerType>();
   unsigned targetBits = targetElementType.getWidth();
   unsigned sourceBits = sourceElementType.getWidth();
 
   if (targetBits == sourceBits)
-    rewriter.replaceOp(op, adaptor.in());
+    rewriter.replaceOp(op, adaptor.getIn());
   else if (targetBits < sourceBits)
-    rewriter.replaceOpWithNewOp<LLVM::TruncOp>(op, targetType, adaptor.in());
+    rewriter.replaceOpWithNewOp<LLVM::TruncOp>(op, targetType, adaptor.getIn());
   else
-    rewriter.replaceOpWithNewOp<LLVM::SExtOp>(op, targetType, adaptor.in());
+    rewriter.replaceOpWithNewOp<LLVM::SExtOp>(op, targetType, adaptor.getIn());
   return success();
 }
 
@@ -161,7 +161,7 @@ static LLVMPredType convertCmpPredicate(PredType pred) {
 LogicalResult
 CmpIOpLowering::matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
                                 ConversionPatternRewriter &rewriter) const {
-  auto operandType = adaptor.lhs().getType();
+  auto operandType = adaptor.getLhs().getType();
   auto resultType = op.getResult().getType();
 
   // Handle the scalar and 1D vector cases.
@@ -169,7 +169,7 @@ CmpIOpLowering::matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
     rewriter.replaceOpWithNewOp<LLVM::ICmpOp>(
         op, typeConverter->convertType(resultType),
         convertCmpPredicate<LLVM::ICmpPredicate>(op.getPredicate()),
-        adaptor.lhs(), adaptor.rhs());
+        adaptor.getLhs(), adaptor.getRhs());
     return success();
   }
 
@@ -184,7 +184,7 @@ CmpIOpLowering::matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
         return rewriter.create<LLVM::ICmpOp>(
             op.getLoc(), llvm1DVectorTy,
             convertCmpPredicate<LLVM::ICmpPredicate>(op.getPredicate()),
-            adaptor.lhs(), adaptor.rhs());
+            adaptor.getLhs(), adaptor.getRhs());
       },
       rewriter);
 
@@ -198,7 +198,7 @@ CmpIOpLowering::matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
 LogicalResult
 CmpFOpLowering::matchAndRewrite(arith::CmpFOp op, OpAdaptor adaptor,
                                 ConversionPatternRewriter &rewriter) const {
-  auto operandType = adaptor.lhs().getType();
+  auto operandType = adaptor.getLhs().getType();
   auto resultType = op.getResult().getType();
 
   // Handle the scalar and 1D vector cases.
@@ -206,7 +206,7 @@ CmpFOpLowering::matchAndRewrite(arith::CmpFOp op, OpAdaptor adaptor,
     rewriter.replaceOpWithNewOp<LLVM::FCmpOp>(
         op, typeConverter->convertType(resultType),
         convertCmpPredicate<LLVM::FCmpPredicate>(op.getPredicate()),
-        adaptor.lhs(), adaptor.rhs());
+        adaptor.getLhs(), adaptor.getRhs());
     return success();
   }
 
@@ -221,7 +221,7 @@ CmpFOpLowering::matchAndRewrite(arith::CmpFOp op, OpAdaptor adaptor,
         return rewriter.create<LLVM::FCmpOp>(
             op.getLoc(), llvm1DVectorTy,
             convertCmpPredicate<LLVM::FCmpPredicate>(op.getPredicate()),
-            adaptor.lhs(), adaptor.rhs());
+            adaptor.getLhs(), adaptor.getRhs());
       },
       rewriter);
 }

diff  --git a/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp b/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
index 8e4927d32348f..d1eed3231d6f4 100644
--- a/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
+++ b/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
@@ -274,7 +274,7 @@ LogicalResult ConstantCompositeOpPattern::matchAndRewrite(
   if (!dstType)
     return failure();
 
-  auto dstElementsAttr = constOp.value().dyn_cast<DenseElementsAttr>();
+  auto dstElementsAttr = constOp.getValue().dyn_cast<DenseElementsAttr>();
   ShapedType dstAttrType = dstElementsAttr.getType();
   if (!dstElementsAttr)
     return failure();
@@ -358,7 +358,7 @@ LogicalResult ConstantScalarOpPattern::matchAndRewrite(
 
   // Floating-point types.
   if (srcType.isa<FloatType>()) {
-    auto srcAttr = constOp.value().cast<FloatAttr>();
+    auto srcAttr = constOp.getValue().cast<FloatAttr>();
     auto dstAttr = srcAttr;
 
     // Floating-point types not supported in the target environment are all
@@ -377,7 +377,7 @@ LogicalResult ConstantScalarOpPattern::matchAndRewrite(
   if (srcType.isInteger(1)) {
     // arith.constant can use 0/1 instead of true/false for i1 values. We need
     // to handle that here.
-    auto dstAttr = convertBoolAttr(constOp.value(), rewriter);
+    auto dstAttr = convertBoolAttr(constOp.getValue(), rewriter);
     if (!dstAttr)
       return failure();
     rewriter.replaceOpWithNewOp<spirv::ConstantOp>(constOp, dstType, dstAttr);
@@ -386,7 +386,7 @@ LogicalResult ConstantScalarOpPattern::matchAndRewrite(
 
   // IndexType or IntegerType. Index values are converted to 32-bit integer
   // values when converting to SPIR-V.
-  auto srcAttr = constOp.value().cast<IntegerAttr>();
+  auto srcAttr = constOp.getValue().cast<IntegerAttr>();
   auto dstAttr =
       convertIntegerAttr(srcAttr, dstType.cast<IntegerType>(), rewriter);
   if (!dstAttr)
@@ -604,7 +604,7 @@ LogicalResult TypeCastingOpPattern<Op, SPIRVOp>::matchAndRewrite(
 LogicalResult CmpIOpBooleanPattern::matchAndRewrite(
     arith::CmpIOp op, OpAdaptor adaptor,
     ConversionPatternRewriter &rewriter) const {
-  Type operandType = op.lhs().getType();
+  Type operandType = op.getLhs().getType();
   if (!isBoolScalarOrVector(operandType))
     return failure();
 
@@ -631,7 +631,7 @@ LogicalResult CmpIOpBooleanPattern::matchAndRewrite(
 LogicalResult
 CmpIOpPattern::matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
                                ConversionPatternRewriter &rewriter) const {
-  Type operandType = op.lhs().getType();
+  Type operandType = op.getLhs().getType();
   if (isBoolScalarOrVector(operandType))
     return failure();
 
@@ -708,14 +708,14 @@ LogicalResult CmpFOpNanKernelPattern::matchAndRewrite(
     arith::CmpFOp op, OpAdaptor adaptor,
     ConversionPatternRewriter &rewriter) const {
   if (op.getPredicate() == arith::CmpFPredicate::ORD) {
-    rewriter.replaceOpWithNewOp<spirv::OrderedOp>(op, adaptor.lhs(),
-                                                  adaptor.rhs());
+    rewriter.replaceOpWithNewOp<spirv::OrderedOp>(op, adaptor.getLhs(),
+                                                  adaptor.getRhs());
     return success();
   }
 
   if (op.getPredicate() == arith::CmpFPredicate::UNO) {
-    rewriter.replaceOpWithNewOp<spirv::UnorderedOp>(op, adaptor.lhs(),
-                                                    adaptor.rhs());
+    rewriter.replaceOpWithNewOp<spirv::UnorderedOp>(op, adaptor.getLhs(),
+                                                    adaptor.getRhs());
     return success();
   }
 
@@ -735,8 +735,8 @@ LogicalResult CmpFOpNanNonePattern::matchAndRewrite(
 
   Location loc = op.getLoc();
 
-  Value lhsIsNan = rewriter.create<spirv::IsNanOp>(loc, adaptor.lhs());
-  Value rhsIsNan = rewriter.create<spirv::IsNanOp>(loc, adaptor.rhs());
+  Value lhsIsNan = rewriter.create<spirv::IsNanOp>(loc, adaptor.getLhs());
+  Value rhsIsNan = rewriter.create<spirv::IsNanOp>(loc, adaptor.getRhs());
 
   Value replace = rewriter.create<spirv::LogicalOrOp>(loc, lhsIsNan, rhsIsNan);
   if (op.getPredicate() == arith::CmpFPredicate::ORD)

diff  --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index faa3028947ce6..f46dd4239220c 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -743,7 +743,7 @@ class RuntimeAwaitAndResumeOpLowering
         op->getLoc(), LLVM::LLVMPointerType::get(resumeFnTy), kResume);
 
     rewriter.create<CallOp>(op->getLoc(), apiFuncName, TypeRange(),
-                            ValueRange({operand, handle, resumePtr.res()}));
+                            ValueRange({operand, handle, resumePtr.getRes()}));
     rewriter.eraseOp(op);
 
     return success();
@@ -771,8 +771,8 @@ class RuntimeResumeOpLowering : public OpConversionPattern<RuntimeResumeOp> {
 
     // Call async runtime API to execute a coroutine in the managed thread.
     auto coroHdl = adaptor.handle();
-    rewriter.replaceOpWithNewOp<CallOp>(op, TypeRange(), kExecute,
-                                        ValueRange({coroHdl, resumePtr.res()}));
+    rewriter.replaceOpWithNewOp<CallOp>(
+        op, TypeRange(), kExecute, ValueRange({coroHdl, resumePtr.getRes()}));
 
     return success();
   }

diff  --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
index c8fc7b2346496..27be2cfe3ad5c 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
@@ -93,7 +93,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
       auto elementType =
           global.getType().cast<LLVM::LLVMArrayType>().getElementType();
       Value memory = rewriter.create<LLVM::GEPOp>(
-          loc, LLVM::LLVMPointerType::get(elementType, global.addr_space()),
+          loc, LLVM::LLVMPointerType::get(elementType, global.getAddrSpace()),
           address, ArrayRef<Value>{zero, zero});
 
       // Build a memref descriptor pointing to the buffer to plug with the

diff  --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
index eacbc511fbab7..757f3828bdc7d 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
@@ -520,7 +520,7 @@ LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite(
 static bool isDefinedByCallTo(Value value, StringRef functionName) {
   assert(value.getType().isa<LLVM::LLVMPointerType>());
   if (auto defOp = value.getDefiningOp<LLVM::CallOp>())
-    return defOp.callee()->equals(functionName);
+    return defOp.getCallee()->equals(functionName);
   return false;
 }
 

diff  --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
index da3e490e44db5..200fc3e21149c 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
@@ -103,15 +103,16 @@ class VulkanLaunchFuncToVulkanCallsPass
 
   /// Checks whether the given LLVM::CallOp is a vulkan launch call op.
   bool isVulkanLaunchCallOp(LLVM::CallOp callOp) {
-    return (callOp.callee() && callOp.callee().getValue() == kVulkanLaunch &&
+    return (callOp.getCallee() &&
+            callOp.getCallee().getValue() == kVulkanLaunch &&
             callOp.getNumOperands() >= kVulkanLaunchNumConfigOperands);
   }
 
   /// Checks whether the given LLVM::CallOp is a "ci_face" vulkan launch call
   /// op.
   bool isCInterfaceVulkanLaunchCallOp(LLVM::CallOp callOp) {
-    return (callOp.callee() &&
-            callOp.callee().getValue() == kCInterfaceVulkanLaunch &&
+    return (callOp.getCallee() &&
+            callOp.getCallee().getValue() == kCInterfaceVulkanLaunch &&
             callOp.getNumOperands() >= kVulkanLaunchNumConfigOperands);
   }
 

diff  --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 6188e8b571123..46c689d9b1775 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -377,7 +377,10 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
       return idx;
 
     if (auto constantOp = dimOp.index().getDefiningOp<LLVM::ConstantOp>())
-      return constantOp.value().cast<IntegerAttr>().getValue().getSExtValue();
+      return constantOp.getValue()
+          .cast<IntegerAttr>()
+          .getValue()
+          .getSExtValue();
 
     return llvm::None;
   }

diff  --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index 73c7a1e2702dc..7142e968d51da 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -421,7 +421,7 @@ static LogicalResult processParallelLoop(
       return val;
     if (auto constOp = val.getDefiningOp<arith::ConstantOp>())
       return rewriter.create<arith::ConstantOp>(constOp.getLoc(),
-                                                constOp.value());
+                                                constOp.getValue());
     return {};
   };
 

diff  --git a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
index 28334364eda18..abcef4887e495 100644
--- a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
+++ b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
@@ -92,17 +92,17 @@ matchSelectReduction(Block &block, ArrayRef<Predicate> lessThanPredicates,
 
   // Detect whether the comparison is less-than or greater-than, otherwise bail.
   bool isLess;
-  if (llvm::find(lessThanPredicates, compare.predicate()) !=
+  if (llvm::find(lessThanPredicates, compare.getPredicate()) !=
       lessThanPredicates.end()) {
     isLess = true;
-  } else if (llvm::find(greaterThanPredicates, compare.predicate()) !=
+  } else if (llvm::find(greaterThanPredicates, compare.getPredicate()) !=
              greaterThanPredicates.end()) {
     isLess = false;
   } else {
     return false;
   }
 
-  if (select.condition() != compare.getResult())
+  if (select.getCondition() != compare.getResult())
     return false;
 
   // Detect if the operands are swapped between cmpf and select. Match the
@@ -112,10 +112,10 @@ matchSelectReduction(Block &block, ArrayRef<Predicate> lessThanPredicates,
   // positions.
   constexpr unsigned kTrueValue = 1;
   constexpr unsigned kFalseValue = 2;
-  bool sameOperands = select.getOperand(kTrueValue) == compare.lhs() &&
-                      select.getOperand(kFalseValue) == compare.rhs();
-  bool swappedOperands = select.getOperand(kTrueValue) == compare.rhs() &&
-                         select.getOperand(kFalseValue) == compare.lhs();
+  bool sameOperands = select.getOperand(kTrueValue) == compare.getLhs() &&
+                      select.getOperand(kFalseValue) == compare.getRhs();
+  bool swappedOperands = select.getOperand(kTrueValue) == compare.getRhs() &&
+                         select.getOperand(kFalseValue) == compare.getLhs();
   if (!sameOperands && !swappedOperands)
     return false;
 

diff  --git a/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp b/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp
index d5388dfd4040c..ccf6fd38100e5 100644
--- a/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp
+++ b/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp
@@ -29,7 +29,7 @@ class ConvertCstrRequireOp : public OpRewritePattern<shape::CstrRequireOp> {
   using OpRewritePattern::OpRewritePattern;
   LogicalResult matchAndRewrite(shape::CstrRequireOp op,
                                 PatternRewriter &rewriter) const override {
-    rewriter.create<AssertOp>(op.getLoc(), op.pred(), op.msgAttr());
+    rewriter.create<AssertOp>(op.getLoc(), op.getPred(), op.getMsgAttr());
     rewriter.replaceOpWithNewOp<shape::ConstWitnessOp>(op, true);
     return success();
   }

diff  --git a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
index 8116357c38e12..e1e24faa4d2a6 100644
--- a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
+++ b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
@@ -40,7 +40,7 @@ AnyOpConversion::matchAndRewrite(AnyOp op, OpAdaptor adaptor,
                                  ConversionPatternRewriter &rewriter) const {
   // Replace `any` with its first operand.
   // Any operand would be a valid substitution.
-  rewriter.replaceOp(op, {adaptor.inputs().front()});
+  rewriter.replaceOp(op, {adaptor.getInputs().front()});
   return success();
 }
 
@@ -57,7 +57,8 @@ class BinaryOpConversion : public OpConversionPattern<SrcOpTy> {
     if (op.getType().template isa<SizeType>())
       return failure();
 
-    rewriter.replaceOpWithNewOp<DstOpTy>(op, adaptor.lhs(), adaptor.rhs());
+    rewriter.replaceOpWithNewOp<DstOpTy>(op, adaptor.getLhs(),
+                                         adaptor.getRhs());
     return success();
   }
 };
@@ -134,7 +135,7 @@ LogicalResult BroadcastOpConverter::matchAndRewrite(
   // representing the shape extents, the rank is the extent of the only
   // dimension in the tensor.
   SmallVector<Value> ranks, rankDiffs;
-  llvm::append_range(ranks, llvm::map_range(adaptor.shapes(), [&](Value v) {
+  llvm::append_range(ranks, llvm::map_range(adaptor.getShapes(), [&](Value v) {
                        return lb.create<tensor::DimOp>(v, zero);
                      }));
 
@@ -154,8 +155,9 @@ LogicalResult BroadcastOpConverter::matchAndRewrite(
   Value replacement = lb.create<tensor::GenerateOp>(
       getExtentTensorType(lb.getContext()), ValueRange{maxRank},
       [&](OpBuilder &b, Location loc, ValueRange args) {
-        Value broadcastedDim = getBroadcastedDim(
-            ImplicitLocOpBuilder(loc, b), adaptor.shapes(), rankDiffs, args[0]);
+        Value broadcastedDim =
+            getBroadcastedDim(ImplicitLocOpBuilder(loc, b), adaptor.getShapes(),
+                              rankDiffs, args[0]);
 
         b.create<tensor::YieldOp>(loc, broadcastedDim);
       });
@@ -187,14 +189,14 @@ LogicalResult ConstShapeOpConverter::matchAndRewrite(
 
   auto loc = op.getLoc();
   SmallVector<Value, 4> extentOperands;
-  for (auto extent : op.shape()) {
+  for (auto extent : op.getShape()) {
     extentOperands.push_back(
         rewriter.create<arith::ConstantIndexOp>(loc, extent.getLimitedValue()));
   }
   Type indexTy = rewriter.getIndexType();
   Value tensor =
       rewriter.create<tensor::FromElementsOp>(loc, indexTy, extentOperands);
-  Type resultTy = RankedTensorType::get({op.shape().size()}, indexTy);
+  Type resultTy = RankedTensorType::get({op.getShape().size()}, indexTy);
   rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultTy, tensor);
   return success();
 }
@@ -214,7 +216,7 @@ LogicalResult ConstSizeOpConversion::matchAndRewrite(
     ConstSizeOp op, OpAdaptor adaptor,
     ConversionPatternRewriter &rewriter) const {
   rewriter.replaceOpWithNewOp<arith::ConstantIndexOp>(
-      op, op.value().getSExtValue());
+      op, op.getValue().getSExtValue());
   return success();
 }
 
@@ -234,7 +236,7 @@ LogicalResult IsBroadcastableOpConverter::matchAndRewrite(
     ConversionPatternRewriter &rewriter) const {
   // For now, this lowering is only defined on `tensor<?xindex>` operands, not
   // on shapes.
-  if (!llvm::all_of(op.shapes(),
+  if (!llvm::all_of(op.getShapes(),
                     [](Value v) { return !v.getType().isa<ShapeType>(); }))
     return failure();
 
@@ -248,7 +250,7 @@ LogicalResult IsBroadcastableOpConverter::matchAndRewrite(
   // representing the shape extents, the rank is the extent of the only
   // dimension in the tensor.
   SmallVector<Value> ranks, rankDiffs;
-  llvm::append_range(ranks, llvm::map_range(adaptor.shapes(), [&](Value v) {
+  llvm::append_range(ranks, llvm::map_range(adaptor.getShapes(), [&](Value v) {
                        return lb.create<tensor::DimOp>(v, zero);
                      }));
 
@@ -276,10 +278,10 @@ LogicalResult IsBroadcastableOpConverter::matchAndRewrite(
         // could reuse the Broadcast lowering entirely, but we redo the work
         // here to make optimizations easier between the two loops.
         Value broadcastedDim = getBroadcastedDim(
-            ImplicitLocOpBuilder(loc, b), adaptor.shapes(), rankDiffs, iv);
+            ImplicitLocOpBuilder(loc, b), adaptor.getShapes(), rankDiffs, iv);
 
         Value broadcastable = iterArgs[0];
-        for (auto tup : llvm::zip(adaptor.shapes(), rankDiffs)) {
+        for (auto tup : llvm::zip(adaptor.getShapes(), rankDiffs)) {
           Value shape, rankDiff;
           std::tie(shape, rankDiff) = tup;
           Value outOfBounds = b.create<arith::CmpIOp>(
@@ -339,16 +341,17 @@ LogicalResult GetExtentOpConverter::matchAndRewrite(
 
   // Derive shape extent directly from shape origin if possible. This
   // circumvents the necessity to materialize the shape in memory.
-  if (auto shapeOfOp = op.shape().getDefiningOp<ShapeOfOp>()) {
-    if (shapeOfOp.arg().getType().isa<ShapedType>()) {
-      rewriter.replaceOpWithNewOp<tensor::DimOp>(op, shapeOfOp.arg(),
-                                                 adaptor.dim());
+  if (auto shapeOfOp = op.getShape().getDefiningOp<ShapeOfOp>()) {
+    if (shapeOfOp.getArg().getType().isa<ShapedType>()) {
+      rewriter.replaceOpWithNewOp<tensor::DimOp>(op, shapeOfOp.getArg(),
+                                                 adaptor.getDim());
       return success();
     }
   }
 
-  rewriter.replaceOpWithNewOp<tensor::ExtractOp>(
-      op, rewriter.getIndexType(), adaptor.shape(), ValueRange{adaptor.dim()});
+  rewriter.replaceOpWithNewOp<tensor::ExtractOp>(op, rewriter.getIndexType(),
+                                                 adaptor.getShape(),
+                                                 ValueRange{adaptor.getDim()});
   return success();
 }
 
@@ -370,7 +373,7 @@ RankOpConverter::matchAndRewrite(shape::RankOp op, OpAdaptor adaptor,
   if (op.getType().isa<SizeType>())
     return failure();
 
-  rewriter.replaceOpWithNewOp<tensor::DimOp>(op, adaptor.shape(), 0);
+  rewriter.replaceOpWithNewOp<tensor::DimOp>(op, adaptor.getShape(), 0);
   return success();
 }
 
@@ -390,7 +393,7 @@ LogicalResult
 ReduceOpConverter::matchAndRewrite(shape::ReduceOp op, OpAdaptor adaptor,
                                    ConversionPatternRewriter &rewriter) const {
   // For now, this lowering is only defined on `tensor<?xindex>` operands.
-  if (op.shape().getType().isa<ShapeType>())
+  if (op.getShape().getType().isa<ShapeType>())
     return failure();
 
   auto loc = op.getLoc();
@@ -399,12 +402,12 @@ ReduceOpConverter::matchAndRewrite(shape::ReduceOp op, OpAdaptor adaptor,
   Value one = rewriter.create<arith::ConstantIndexOp>(loc, 1);
   Type indexTy = rewriter.getIndexType();
   Value rank =
-      rewriter.create<tensor::DimOp>(loc, indexTy, adaptor.shape(), zero);
+      rewriter.create<tensor::DimOp>(loc, indexTy, adaptor.getShape(), zero);
 
   auto loop = rewriter.create<scf::ForOp>(
-      loc, zero, rank, one, op.initVals(),
+      loc, zero, rank, one, op.getInitVals(),
       [&](OpBuilder &b, Location loc, Value iv, ValueRange args) {
-        Value extent = b.create<tensor::ExtractOp>(loc, adaptor.shape(), iv);
+        Value extent = b.create<tensor::ExtractOp>(loc, adaptor.getShape(), iv);
 
         SmallVector<Value, 2> mappedValues{iv, extent};
         mappedValues.append(args.begin(), args.end());
@@ -468,12 +471,12 @@ struct ShapeEqOpConverter : public OpConversionPattern<ShapeEqOp> {
 LogicalResult
 ShapeEqOpConverter::matchAndRewrite(ShapeEqOp op, OpAdaptor adaptor,
                                     ConversionPatternRewriter &rewriter) const {
-  if (!llvm::all_of(op.shapes(),
+  if (!llvm::all_of(op.getShapes(),
                     [](Value v) { return !v.getType().isa<ShapeType>(); }))
     return failure();
 
   Type i1Ty = rewriter.getI1Type();
-  if (op.shapes().size() <= 1) {
+  if (op.getShapes().size() <= 1) {
     rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, i1Ty,
                                                    rewriter.getBoolAttr(true));
     return success();
@@ -482,12 +485,12 @@ ShapeEqOpConverter::matchAndRewrite(ShapeEqOp op, OpAdaptor adaptor,
   auto loc = op.getLoc();
   Type indexTy = rewriter.getIndexType();
   Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
-  Value firstShape = adaptor.shapes().front();
+  Value firstShape = adaptor.getShapes().front();
   Value firstRank =
       rewriter.create<tensor::DimOp>(loc, indexTy, firstShape, zero);
   Value result = nullptr;
   // Generate a linear sequence of compares, all with firstShape as lhs.
-  for (Value shape : adaptor.shapes().drop_front(1)) {
+  for (Value shape : adaptor.getShapes().drop_front(1)) {
     Value rank = rewriter.create<tensor::DimOp>(loc, indexTy, shape, zero);
     Value eqRank = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
                                                   firstRank, rank);
@@ -545,7 +548,7 @@ LogicalResult ShapeOfOpConversion::matchAndRewrite(
 
   // For ranked tensor arguments, lower to `tensor.from_elements`.
   auto loc = op.getLoc();
-  Value tensor = adaptor.arg();
+  Value tensor = adaptor.getArg();
   Type tensorTy = tensor.getType();
   if (tensorTy.isa<RankedTensorType>()) {
 
@@ -602,16 +605,16 @@ LogicalResult SplitAtOpConversion::matchAndRewrite(
     ConversionPatternRewriter &rewriter) const {
   // Error conditions are not implemented, only lower if all operands and
   // results are extent tensors.
-  if (llvm::any_of(ValueRange{op.operand(), op.head(), op.tail()},
+  if (llvm::any_of(ValueRange{op.getOperand(), op.getHead(), op.getTail()},
                    [](Value v) { return v.getType().isa<ShapeType>(); }))
     return failure();
 
   ImplicitLocOpBuilder b(op.getLoc(), rewriter);
   Value zero = b.create<arith::ConstantIndexOp>(0);
-  Value rank = b.create<tensor::DimOp>(adaptor.operand(), zero);
+  Value rank = b.create<tensor::DimOp>(adaptor.getOperand(), zero);
 
   // index < 0 ? index + rank : index
-  Value originalIndex = adaptor.index();
+  Value originalIndex = adaptor.getIndex();
   Value add = b.create<arith::AddIOp>(originalIndex, rank);
   Value indexIsNegative =
       b.create<arith::CmpIOp>(arith::CmpIPredicate::slt, originalIndex, zero);
@@ -619,10 +622,10 @@ LogicalResult SplitAtOpConversion::matchAndRewrite(
 
   Value one = b.create<arith::ConstantIndexOp>(1);
   Value head =
-      b.create<tensor::ExtractSliceOp>(adaptor.operand(), zero, index, one);
+      b.create<tensor::ExtractSliceOp>(adaptor.getOperand(), zero, index, one);
   Value tailSize = b.create<arith::SubIOp>(rank, index);
-  Value tail =
-      b.create<tensor::ExtractSliceOp>(adaptor.operand(), index, tailSize, one);
+  Value tail = b.create<tensor::ExtractSliceOp>(adaptor.getOperand(), index,
+                                                tailSize, one);
   rewriter.replaceOp(op, {head, tail});
   return success();
 }
@@ -636,11 +639,11 @@ class ToExtentTensorOpConversion
   LogicalResult
   matchAndRewrite(ToExtentTensorOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    if (!adaptor.input().getType().isa<RankedTensorType>())
+    if (!adaptor.getInput().getType().isa<RankedTensorType>())
       return rewriter.notifyMatchFailure(op, "input needs to be a tensor");
 
     rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(),
-                                                adaptor.input());
+                                                adaptor.getInput());
     return success();
   }
 };

diff  --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index 8ec9828ce3c72..0aad8177b0d4d 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -427,7 +427,7 @@ struct AssertOpLowering : public ConvertOpToLLVMPattern<AssertOp> {
     // Generate assertion test.
     rewriter.setInsertionPointToEnd(opBlock);
     rewriter.replaceOpWithNewOp<LLVM::CondBrOp>(
-        op, adaptor.arg(), continuationBlock, failureBlock);
+        op, adaptor.getArg(), continuationBlock, failureBlock);
 
     return success();
   }
@@ -573,9 +573,9 @@ struct RankOpLowering : public ConvertOpToLLVMPattern<RankOp> {
   matchAndRewrite(RankOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = op.getLoc();
-    Type operandType = op.memrefOrTensor().getType();
+    Type operandType = op.getMemrefOrTensor().getType();
     if (auto unrankedMemRefType = operandType.dyn_cast<UnrankedMemRefType>()) {
-      UnrankedMemRefDescriptor desc(adaptor.memrefOrTensor());
+      UnrankedMemRefDescriptor desc(adaptor.getMemrefOrTensor());
       rewriter.replaceOp(op, {desc.rank(rewriter, loc)});
       return success();
     }
@@ -722,7 +722,7 @@ struct SplatOpLowering : public ConvertOpToLLVMPattern<SplatOp> {
         rewriter.getZeroAttr(rewriter.getIntegerType(32)));
 
     auto v = rewriter.create<LLVM::InsertElementOp>(
-        splatOp.getLoc(), vectorType, undef, adaptor.input(), zero);
+        splatOp.getLoc(), vectorType, undef, adaptor.getInput(), zero);
 
     int64_t width = splatOp.getType().cast<VectorType>().getDimSize(0);
     SmallVector<int32_t, 4> zeroValues(width, 0);
@@ -767,7 +767,7 @@ struct SplatNdOpLowering : public ConvertOpToLLVMPattern<SplatOp> {
         loc, typeConverter->convertType(rewriter.getIntegerType(32)),
         rewriter.getZeroAttr(rewriter.getIntegerType(32)));
     Value v = rewriter.create<LLVM::InsertElementOp>(loc, llvm1DVectorTy, vdesc,
-                                                     adaptor.input(), zero);
+                                                     adaptor.getInput(), zero);
 
     // Shuffle the value across the desired number of elements.
     int64_t width = resultType.getDimSize(resultType.getRank() - 1);
@@ -791,7 +791,7 @@ struct SplatNdOpLowering : public ConvertOpToLLVMPattern<SplatOp> {
 /// Try to match the kind of a std.atomic_rmw to determine whether to use a
 /// lowering to llvm.atomicrmw or fallback to llvm.cmpxchg.
 static Optional<LLVM::AtomicBinOp> matchSimpleAtomicOp(AtomicRMWOp atomicOp) {
-  switch (atomicOp.kind()) {
+  switch (atomicOp.getKind()) {
   case AtomicRMWKind::addf:
     return LLVM::AtomicBinOp::fadd;
   case AtomicRMWKind::addi:
@@ -825,13 +825,13 @@ struct AtomicRMWOpLowering : public LoadStoreOpLowering<AtomicRMWOp> {
     auto maybeKind = matchSimpleAtomicOp(atomicOp);
     if (!maybeKind)
       return failure();
-    auto resultType = adaptor.value().getType();
+    auto resultType = adaptor.getValue().getType();
     auto memRefType = atomicOp.getMemRefType();
     auto dataPtr =
-        getStridedElementPtr(atomicOp.getLoc(), memRefType, adaptor.memref(),
-                             adaptor.indices(), rewriter);
+        getStridedElementPtr(atomicOp.getLoc(), memRefType, adaptor.getMemref(),
+                             adaptor.getIndices(), rewriter);
     rewriter.replaceOpWithNewOp<LLVM::AtomicRMWOp>(
-        atomicOp, resultType, *maybeKind, dataPtr, adaptor.value(),
+        atomicOp, resultType, *maybeKind, dataPtr, adaptor.getValue(),
         LLVM::AtomicOrdering::acq_rel);
     return success();
   }
@@ -889,9 +889,9 @@ struct GenericAtomicRMWOpLowering
 
     // Compute the loaded value and branch to the loop block.
     rewriter.setInsertionPointToEnd(initBlock);
-    auto memRefType = atomicOp.memref().getType().cast<MemRefType>();
-    auto dataPtr = getStridedElementPtr(loc, memRefType, adaptor.memref(),
-                                        adaptor.indices(), rewriter);
+    auto memRefType = atomicOp.getMemref().getType().cast<MemRefType>();
+    auto dataPtr = getStridedElementPtr(loc, memRefType, adaptor.getMemref(),
+                                        adaptor.getIndices(), rewriter);
     Value init = rewriter.create<LLVM::LoadOp>(loc, dataPtr);
     rewriter.create<LLVM::BrOp>(loc, init, loopBlock);
 

diff  --git a/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp b/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
index 320f045fcb246..48b6e99257bb5 100644
--- a/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
+++ b/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
@@ -154,8 +154,9 @@ ReturnOpPattern::matchAndRewrite(ReturnOp returnOp, OpAdaptor adaptor,
 LogicalResult
 SelectOpPattern::matchAndRewrite(SelectOp op, OpAdaptor adaptor,
                                  ConversionPatternRewriter &rewriter) const {
-  rewriter.replaceOpWithNewOp<spirv::SelectOp>(
-      op, adaptor.condition(), adaptor.true_value(), adaptor.false_value());
+  rewriter.replaceOpWithNewOp<spirv::SelectOp>(op, adaptor.getCondition(),
+                                               adaptor.getTrueValue(),
+                                               adaptor.getFalseValue());
   return success();
 }
 
@@ -169,7 +170,7 @@ SplatPattern::matchAndRewrite(SplatOp op, OpAdaptor adaptor,
   auto dstVecType = op.getType().dyn_cast<VectorType>();
   if (!dstVecType || !spirv::CompositeType::isValid(dstVecType))
     return failure();
-  SmallVector<Value, 4> source(dstVecType.getNumElements(), adaptor.input());
+  SmallVector<Value, 4> source(dstVecType.getNumElements(), adaptor.getInput());
   rewriter.replaceOpWithNewOp<spirv::CompositeConstructOp>(op, dstVecType,
                                                            source);
   return success();

diff  --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 29cf0db7ca46e..8e037ecf5c852 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -121,7 +121,7 @@ static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) {
   auto vecType = constantOp.getType().dyn_cast<VectorType>();
   if (!vecType || vecType.getRank() != 2)
     return false;
-  return constantOp.value().isa<SplatElementsAttr>();
+  return constantOp.getValue().isa<SplatElementsAttr>();
 }
 
 /// Return true if this is a broadcast from scalar to a 2D vector.
@@ -329,7 +329,7 @@ static void convertConstantOp(arith::ConstantOp op,
                               llvm::DenseMap<Value, Value> &valueMapping) {
   assert(constantSupportsMMAMatrixType(op));
   OpBuilder b(op);
-  Attribute splat = op.value().cast<SplatElementsAttr>().getSplatValue();
+  Attribute splat = op.getValue().cast<SplatElementsAttr>().getSplatValue();
   auto scalarConstant =
       b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat);
   const char *fragType = inferFragType(op);

diff  --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
index e304c9d23e3ea..827556dca293b 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
@@ -949,7 +949,7 @@ static arith::ConstantOp vectorizeConstant(arith::ConstantOp constOp,
     return nullptr;
 
   auto vecTy = getVectorType(scalarTy, state.strategy);
-  auto vecAttr = DenseElementsAttr::get(vecTy, constOp.value());
+  auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
 
   OpBuilder::InsertionGuard guard(state.builder);
   Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
@@ -1253,7 +1253,7 @@ static bool isNeutralElementConst(AtomicRMWKind reductionKind, Value value,
   Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
                                              state.builder, value.getLoc());
   if (auto constOp = dyn_cast_or_null<arith::ConstantOp>(value.getDefiningOp()))
-    return constOp.value() == valueAttr;
+    return constOp.getValue() == valueAttr;
   return false;
 }
 

diff  --git a/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp b/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
index f73df26cd1f90..09c742bc82176 100644
--- a/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
+++ b/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
@@ -82,7 +82,7 @@ namespace {
 void arith::ConstantOp::getAsmResultNames(
     function_ref<void(Value, StringRef)> setNameFn) {
   auto type = getType();
-  if (auto intCst = value().dyn_cast<IntegerAttr>()) {
+  if (auto intCst = getValue().dyn_cast<IntegerAttr>()) {
     auto intType = type.dyn_cast<IntegerType>();
 
     // Sugar i1 constants with 'true' and 'false'.
@@ -106,15 +106,15 @@ void arith::ConstantOp::getAsmResultNames(
 static LogicalResult verify(arith::ConstantOp op) {
   auto type = op.getType();
   // The value's type must match the return type.
-  if (op.value().getType() != type) {
-    return op.emitOpError() << "value type " << op.value().getType()
+  if (op.getValue().getType() != type) {
+    return op.emitOpError() << "value type " << op.getValue().getType()
                             << " must match return type: " << type;
   }
   // Integer values must be signless.
   if (type.isa<IntegerType>() && !type.cast<IntegerType>().isSignless())
     return op.emitOpError("integer return type must be signless");
   // Any float or elements attribute are acceptable.
-  if (!op.value().isa<IntegerAttr, FloatAttr, ElementsAttr>()) {
+  if (!op.getValue().isa<IntegerAttr, FloatAttr, ElementsAttr>()) {
     return op.emitOpError(
         "value must be an integer, float, or elements attribute");
   }
@@ -133,7 +133,7 @@ bool arith::ConstantOp::isBuildableWith(Attribute value, Type type) {
 }
 
 OpFoldResult arith::ConstantOp::fold(ArrayRef<Attribute> operands) {
-  return value();
+  return getValue();
 }
 
 void arith::ConstantIntOp::build(OpBuilder &builder, OperationState &result,
@@ -187,8 +187,8 @@ bool arith::ConstantIndexOp::classof(Operation *op) {
 
 OpFoldResult arith::AddIOp::fold(ArrayRef<Attribute> operands) {
   // addi(x, 0) -> x
-  if (matchPattern(rhs(), m_Zero()))
-    return lhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(operands,
                                         [](APInt a, APInt b) { return a + b; });
@@ -209,8 +209,8 @@ OpFoldResult arith::SubIOp::fold(ArrayRef<Attribute> operands) {
   if (getOperand(0) == getOperand(1))
     return Builder(getContext()).getZeroAttr(getType());
   // subi(x,0) -> x
-  if (matchPattern(rhs(), m_Zero()))
-    return lhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(operands,
                                         [](APInt a, APInt b) { return a - b; });
@@ -229,10 +229,10 @@ void arith::SubIOp::getCanonicalizationPatterns(
 
 OpFoldResult arith::MulIOp::fold(ArrayRef<Attribute> operands) {
   // muli(x, 0) -> 0
-  if (matchPattern(rhs(), m_Zero()))
-    return rhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getRhs();
   // muli(x, 1) -> x
-  if (matchPattern(rhs(), m_One()))
+  if (matchPattern(getRhs(), m_One()))
     return getOperand(0);
   // TODO: Handle the overflow case.
 
@@ -259,10 +259,10 @@ OpFoldResult arith::DivUIOp::fold(ArrayRef<Attribute> operands) {
   // Fold out division by one. Assumes all tensors of all ones are splats.
   if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
     if (rhs.getValue() == 1)
-      return lhs();
+      return getLhs();
   } else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
     if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
-      return lhs();
+      return getLhs();
   }
 
   return div0 ? Attribute() : result;
@@ -286,10 +286,10 @@ OpFoldResult arith::DivSIOp::fold(ArrayRef<Attribute> operands) {
   // Fold out division by one. Assumes all tensors of all ones are splats.
   if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
     if (rhs.getValue() == 1)
-      return lhs();
+      return getLhs();
   } else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
     if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
-      return lhs();
+      return getLhs();
   }
 
   return overflowOrDiv0 ? Attribute() : result;
@@ -346,10 +346,10 @@ OpFoldResult arith::CeilDivSIOp::fold(ArrayRef<Attribute> operands) {
   // splats.
   if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
     if (rhs.getValue() == 1)
-      return lhs();
+      return getLhs();
   } else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
     if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
-      return lhs();
+      return getLhs();
   }
 
   return overflowOrDiv0 ? Attribute() : result;
@@ -395,10 +395,10 @@ OpFoldResult arith::FloorDivSIOp::fold(ArrayRef<Attribute> operands) {
   // splats.
   if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
     if (rhs.getValue() == 1)
-      return lhs();
+      return getLhs();
   } else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
     if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
-      return lhs();
+      return getLhs();
   }
 
   return overflowOrDiv0 ? Attribute() : result;
@@ -458,15 +458,15 @@ OpFoldResult arith::RemSIOp::fold(ArrayRef<Attribute> operands) {
 
 OpFoldResult arith::AndIOp::fold(ArrayRef<Attribute> operands) {
   /// and(x, 0) -> 0
-  if (matchPattern(rhs(), m_Zero()))
-    return rhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getRhs();
   /// and(x, allOnes) -> x
   APInt intValue;
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) && intValue.isAllOnes())
-    return lhs();
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) && intValue.isAllOnes())
+    return getLhs();
   /// and(x, x) -> x
-  if (lhs() == rhs())
-    return rhs();
+  if (getLhs() == getRhs())
+    return getRhs();
 
   return constFoldBinaryOp<IntegerAttr>(operands,
                                         [](APInt a, APInt b) { return a & b; });
@@ -478,11 +478,11 @@ OpFoldResult arith::AndIOp::fold(ArrayRef<Attribute> operands) {
 
 OpFoldResult arith::OrIOp::fold(ArrayRef<Attribute> operands) {
   /// or(x, 0) -> x
-  if (matchPattern(rhs(), m_Zero()))
-    return lhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getLhs();
   /// or(x, x) -> x
-  if (lhs() == rhs())
-    return rhs();
+  if (getLhs() == getRhs())
+    return getRhs();
   /// or(x, <all ones>) -> <all ones>
   if (auto rhsAttr = operands[1].dyn_cast_or_null<IntegerAttr>())
     if (rhsAttr.getValue().isAllOnes())
@@ -498,10 +498,10 @@ OpFoldResult arith::OrIOp::fold(ArrayRef<Attribute> operands) {
 
 OpFoldResult arith::XOrIOp::fold(ArrayRef<Attribute> operands) {
   /// xor(x, 0) -> x
-  if (matchPattern(rhs(), m_Zero()))
-    return lhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getLhs();
   /// xor(x, x) -> 0
-  if (lhs() == rhs())
+  if (getLhs() == getRhs())
     return Builder(getContext()).getZeroAttr(getType());
 
   return constFoldBinaryOp<IntegerAttr>(operands,
@@ -599,7 +599,7 @@ static bool areValidCastInputsAndOutputs(TypeRange inputs, TypeRange outputs) {
 // Extend ops can only extend to a wider type.
 template <typename ValType, typename Op>
 static LogicalResult verifyExtOp(Op op) {
-  Type srcType = getElementTypeOrSelf(op.in().getType());
+  Type srcType = getElementTypeOrSelf(op.getIn().getType());
   Type dstType = getElementTypeOrSelf(op.getType());
 
   if (srcType.cast<ValType>().getWidth() >= dstType.cast<ValType>().getWidth())
@@ -612,7 +612,7 @@ static LogicalResult verifyExtOp(Op op) {
 // Truncate ops can only truncate to a shorter type.
 template <typename ValType, typename Op>
 static LogicalResult verifyTruncateOp(Op op) {
-  Type srcType = getElementTypeOrSelf(op.in().getType());
+  Type srcType = getElementTypeOrSelf(op.getIn().getType());
   Type dstType = getElementTypeOrSelf(op.getType());
 
   if (srcType.cast<ValType>().getWidth() <= dstType.cast<ValType>().getWidth())
@@ -935,7 +935,7 @@ OpFoldResult arith::CmpIOp::fold(ArrayRef<Attribute> operands) {
   assert(operands.size() == 2 && "cmpi takes two operands");
 
   // cmpi(pred, x, x)
-  if (lhs() == rhs()) {
+  if (getLhs() == getRhs()) {
     auto val = applyCmpPredicateToEqualOperands(getPredicate());
     return BoolAttr::get(getContext(), val);
   }

diff  --git a/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp
index a6a72d9436975..37d2b88837082 100644
--- a/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp
@@ -24,7 +24,7 @@ struct BufferizeIndexCastOp : public OpConversionPattern<arith::IndexCastOp> {
                   ConversionPatternRewriter &rewriter) const override {
     auto tensorType = op.getType().cast<RankedTensorType>();
     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(
-        op, adaptor.in(),
+        op, adaptor.getIn(),
         MemRefType::get(tensorType.getShape(), tensorType.getElementType()));
     return success();
   }

diff  --git a/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
index af6de99ce5ce9..8cfd1c91838b3 100644
--- a/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
@@ -23,8 +23,8 @@ struct CeilDivSIOpConverter : public OpRewritePattern<arith::CeilDivSIOp> {
     Location loc = op.getLoc();
     auto signedCeilDivIOp = cast<arith::CeilDivSIOp>(op);
     Type type = signedCeilDivIOp.getType();
-    Value a = signedCeilDivIOp.lhs();
-    Value b = signedCeilDivIOp.rhs();
+    Value a = signedCeilDivIOp.getLhs();
+    Value b = signedCeilDivIOp.getRhs();
     Value plusOne = rewriter.create<arith::ConstantOp>(
         loc, rewriter.getIntegerAttr(type, 1));
     Value zero = rewriter.create<arith::ConstantOp>(
@@ -79,8 +79,8 @@ struct FloorDivSIOpConverter : public OpRewritePattern<arith::FloorDivSIOp> {
     Location loc = op.getLoc();
     arith::FloorDivSIOp signedFloorDivIOp = cast<arith::FloorDivSIOp>(op);
     Type type = signedFloorDivIOp.getType();
-    Value a = signedFloorDivIOp.lhs();
-    Value b = signedFloorDivIOp.rhs();
+    Value a = signedFloorDivIOp.getLhs();
+    Value b = signedFloorDivIOp.getRhs();
     Value plusOne = rewriter.create<arith::ConstantOp>(
         loc, rewriter.getIntegerAttr(type, 1));
     Value zero = rewriter.create<arith::ConstantOp>(

diff  --git a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
index 0cd375f8c69cd..d7016c88d967c 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
@@ -578,7 +578,7 @@ class AssertOpLowering : public OpConversionPattern<AssertOp> {
 
     Block *cont = rewriter.splitBlock(op->getBlock(), Block::iterator(op));
     rewriter.setInsertionPointToEnd(cont->getPrevNode());
-    rewriter.create<CondBranchOp>(loc, adaptor.arg(),
+    rewriter.create<CondBranchOp>(loc, adaptor.getArg(),
                                   /*trueDest=*/cont,
                                   /*trueArgs=*/ArrayRef<Value>(),
                                   /*falseDest=*/setupSetErrorBlock(coro),

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 4abf1eff86147..16a4eb65f6d2c 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -74,17 +74,17 @@ static void printLLVMOpAttrs(OpAsmPrinter &printer, Operation *op,
 // Printing/parsing for LLVM::CmpOp.
 //===----------------------------------------------------------------------===//
 static void printICmpOp(OpAsmPrinter &p, ICmpOp &op) {
-  p << " \"" << stringifyICmpPredicate(op.predicate()) << "\" "
+  p << " \"" << stringifyICmpPredicate(op.getPredicate()) << "\" "
     << op.getOperand(0) << ", " << op.getOperand(1);
   p.printOptionalAttrDict(op->getAttrs(), {"predicate"});
-  p << " : " << op.lhs().getType();
+  p << " : " << op.getLhs().getType();
 }
 
 static void printFCmpOp(OpAsmPrinter &p, FCmpOp &op) {
-  p << " \"" << stringifyFCmpPredicate(op.predicate()) << "\" "
+  p << " \"" << stringifyFCmpPredicate(op.getPredicate()) << "\" "
     << op.getOperand(0) << ", " << op.getOperand(1);
   p.printOptionalAttrDict(processFMFAttr(op->getAttrs()), {"predicate"});
-  p << " : " << op.lhs().getType();
+  p << " : " << op.getLhs().getType();
 }
 
 // <operation> ::= `llvm.icmp` string-literal ssa-use `,` ssa-use
@@ -159,11 +159,11 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
 static void printAllocaOp(OpAsmPrinter &p, AllocaOp &op) {
   auto elemTy = op.getType().cast<LLVM::LLVMPointerType>().getElementType();
 
-  auto funcTy = FunctionType::get(op.getContext(), {op.arraySize().getType()},
-                                  {op.getType()});
+  auto funcTy = FunctionType::get(
+      op.getContext(), {op.getArraySize().getType()}, {op.getType()});
 
-  p << ' ' << op.arraySize() << " x " << elemTy;
-  if (op.alignment().hasValue() && *op.alignment() != 0)
+  p << ' ' << op.getArraySize() << " x " << elemTy;
+  if (op.getAlignment().hasValue() && *op.getAlignment() != 0)
     p.printOptionalAttrDict(op->getAttrs());
   else
     p.printOptionalAttrDict(op->getAttrs(), {"alignment"});
@@ -215,7 +215,7 @@ static ParseResult parseAllocaOp(OpAsmParser &parser, OperationState &result) {
 Optional<MutableOperandRange>
 BrOp::getMutableSuccessorOperands(unsigned index) {
   assert(index == 0 && "invalid successor index");
-  return destOperandsMutable();
+  return getDestOperandsMutable();
 }
 
 //===----------------------------------------------------------------------===//
@@ -225,7 +225,8 @@ BrOp::getMutableSuccessorOperands(unsigned index) {
 Optional<MutableOperandRange>
 CondBrOp::getMutableSuccessorOperands(unsigned index) {
   assert(index < getNumSuccessors() && "invalid successor index");
-  return index == 0 ? trueDestOperandsMutable() : falseDestOperandsMutable();
+  return index == 0 ? getTrueDestOperandsMutable()
+                    : getFalseDestOperandsMutable();
 }
 
 //===----------------------------------------------------------------------===//
@@ -691,7 +692,7 @@ static LogicalResult verify(LandingpadOp op) {
       // catch - global addresses only.
       // Bitcast ops should have global addresses as their args.
       if (auto bcOp = value.getDefiningOp<BitcastOp>()) {
-        if (auto addrOp = bcOp.arg().getDefiningOp<AddressOfOp>())
+        if (auto addrOp = bcOp.getArg().getDefiningOp<AddressOfOp>())
           continue;
         return op.emitError("constant clauses expected")
                    .attachNote(bcOp.getLoc())
@@ -771,7 +772,7 @@ static LogicalResult verify(CallOp &op) {
   bool isIndirect = false;
 
   // If this is an indirect call, the callee attribute is missing.
-  FlatSymbolRefAttr calleeName = op.calleeAttr();
+  FlatSymbolRefAttr calleeName = op.getCalleeAttr();
   if (!calleeName) {
     isIndirect = true;
     if (!op.getNumOperands())
@@ -845,7 +846,7 @@ static LogicalResult verify(CallOp &op) {
 }
 
 static void printCallOp(OpAsmPrinter &p, CallOp &op) {
-  auto callee = op.callee();
+  auto callee = op.getCallee();
   bool isDirect = callee.hasValue();
 
   // Print the direct callee if present as a function attribute, or an indirect
@@ -962,10 +963,10 @@ void LLVM::ExtractElementOp::build(OpBuilder &b, OperationState &result,
 }
 
 static void printExtractElementOp(OpAsmPrinter &p, ExtractElementOp &op) {
-  p << ' ' << op.vector() << "[" << op.position() << " : "
-    << op.position().getType() << "]";
+  p << ' ' << op.getVector() << "[" << op.getPosition() << " : "
+    << op.getPosition().getType() << "]";
   p.printOptionalAttrDict(op->getAttrs());
-  p << " : " << op.vector().getType();
+  p << " : " << op.getVector().getType();
 }
 
 // <operation> ::= `llvm.extractelement` ssa-use `, ` ssa-use
@@ -991,16 +992,16 @@ static ParseResult parseExtractElementOp(OpAsmParser &parser,
 }
 
 static LogicalResult verify(ExtractElementOp op) {
-  Type vectorType = op.vector().getType();
+  Type vectorType = op.getVector().getType();
   if (!LLVM::isCompatibleVectorType(vectorType))
     return op->emitOpError("expected LLVM dialect-compatible vector type for "
                            "operand #1, got")
            << vectorType;
   Type valueType = LLVM::getVectorElementType(vectorType);
-  if (valueType != op.res().getType())
+  if (valueType != op.getRes().getType())
     return op.emitOpError() << "Type mismatch: extracting from " << vectorType
                             << " should produce " << valueType
-                            << " but this op returns " << op.res().getType();
+                            << " but this op returns " << op.getRes().getType();
   return success();
 }
 
@@ -1009,9 +1010,9 @@ static LogicalResult verify(ExtractElementOp op) {
 //===----------------------------------------------------------------------===//
 
 static void printExtractValueOp(OpAsmPrinter &p, ExtractValueOp &op) {
-  p << ' ' << op.container() << op.position();
+  p << ' ' << op.getContainer() << op.getPosition();
   p.printOptionalAttrDict(op->getAttrs(), {"position"});
-  p << " : " << op.container().getType();
+  p << " : " << op.getContainer().getType();
 }
 
 // Extract the type at `position` in the wrapped LLVM IR aggregate type
@@ -1133,9 +1134,9 @@ static ParseResult parseExtractValueOp(OpAsmParser &parser,
 }
 
 OpFoldResult LLVM::ExtractValueOp::fold(ArrayRef<Attribute> operands) {
-  auto insertValueOp = container().getDefiningOp<InsertValueOp>();
+  auto insertValueOp = getContainer().getDefiningOp<InsertValueOp>();
   while (insertValueOp) {
-    if (position() == insertValueOp.position())
+    if (getPosition() == insertValueOp.position())
       return insertValueOp.value();
     insertValueOp = insertValueOp.container().getDefiningOp<InsertValueOp>();
   }
@@ -1143,16 +1144,16 @@ OpFoldResult LLVM::ExtractValueOp::fold(ArrayRef<Attribute> operands) {
 }
 
 static LogicalResult verify(ExtractValueOp op) {
-  Type valueType = getInsertExtractValueElementType(op.container().getType(),
-                                                    op.positionAttr(), op);
+  Type valueType = getInsertExtractValueElementType(op.getContainer().getType(),
+                                                    op.getPositionAttr(), op);
   if (!valueType)
     return failure();
 
-  if (op.res().getType() != valueType)
+  if (op.getRes().getType() != valueType)
     return op.emitOpError()
-           << "Type mismatch: extracting from " << op.container().getType()
+           << "Type mismatch: extracting from " << op.getContainer().getType()
            << " should produce " << valueType << " but this op returns "
-           << op.res().getType();
+           << op.getRes().getType();
   return success();
 }
 
@@ -1339,12 +1340,12 @@ static OpTy lookupSymbolInModule(Operation *parent, StringRef name) {
 
 GlobalOp AddressOfOp::getGlobal() {
   return lookupSymbolInModule<LLVM::GlobalOp>((*this)->getParentOp(),
-                                              global_name());
+                                              getGlobalName());
 }
 
 LLVMFuncOp AddressOfOp::getFunction() {
   return lookupSymbolInModule<LLVM::LLVMFuncOp>((*this)->getParentOp(),
-                                                global_name());
+                                                getGlobalName());
 }
 
 static LogicalResult verify(AddressOfOp op) {
@@ -1355,7 +1356,7 @@ static LogicalResult verify(AddressOfOp op) {
         "must reference a global defined by 'llvm.mlir.global' or 'llvm.func'");
 
   if (global &&
-      LLVM::LLVMPointerType::get(global.getType(), global.addr_space()) !=
+      LLVM::LLVMPointerType::get(global.getType(), global.getAddrSpace()) !=
           op.getResult().getType())
     return op.emitOpError(
         "the type must be a pointer to the type of the referenced global");
@@ -1400,7 +1401,7 @@ void GlobalOp::build(OpBuilder &builder, OperationState &result, Type type,
   if (alignment != 0)
     result.addAttribute("alignment", builder.getI64IntegerAttr(alignment));
 
-  result.addAttribute(getLinkageAttrName(),
+  result.addAttribute(::getLinkageAttrName(),
                       LinkageAttr::get(builder.getContext(), linkage));
   if (addrSpace != 0)
     result.addAttribute("addr_space", builder.getI32IntegerAttr(addrSpace));
@@ -1409,15 +1410,15 @@ void GlobalOp::build(OpBuilder &builder, OperationState &result, Type type,
 }
 
 static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) {
-  p << ' ' << stringifyLinkage(op.linkage()) << ' ';
-  if (auto unnamedAddr = op.unnamed_addr()) {
+  p << ' ' << stringifyLinkage(op.getLinkage()) << ' ';
+  if (auto unnamedAddr = op.getUnnamedAddr()) {
     StringRef str = stringifyUnnamedAddr(*unnamedAddr);
     if (!str.empty())
       p << str << ' ';
   }
-  if (op.constant())
+  if (op.getConstant())
     p << "constant ";
-  p.printSymbolName(op.sym_name());
+  p.printSymbolName(op.getSymName());
   p << '(';
   if (auto value = op.getValueOrNull())
     p.printAttribute(value);
@@ -1433,7 +1434,7 @@ static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) {
   // Print the trailing type unless it's a string global.
   if (op.getValueOrNull().dyn_cast_or_null<StringAttr>())
     return;
-  p << " : " << op.global_type();
+  p << " : " << op.getType();
 
   Region &initializer = op.getInitializerRegion();
   if (!initializer.empty())
@@ -1595,7 +1596,7 @@ static LogicalResult verify(GlobalOp op) {
       return op.emitOpError("cannot have both initializer value and region");
   }
 
-  if (op.linkage() == Linkage::Common) {
+  if (op.getLinkage() == Linkage::Common) {
     if (Attribute value = op.getValueOrNull()) {
       if (!isZeroAttribute(value)) {
         return op.emitOpError()
@@ -1605,7 +1606,7 @@ static LogicalResult verify(GlobalOp op) {
     }
   }
 
-  if (op.linkage() == Linkage::Appending) {
+  if (op.getLinkage() == Linkage::Appending) {
     if (!op.getType().isa<LLVMArrayType>()) {
       return op.emitOpError()
              << "expected array type for '"
@@ -1613,7 +1614,7 @@ static LogicalResult verify(GlobalOp op) {
     }
   }
 
-  Optional<uint64_t> alignAttr = op.alignment();
+  Optional<uint64_t> alignAttr = op.getAlignment();
   if (alignAttr.hasValue()) {
     uint64_t value = alignAttr.getValue();
     if (!llvm::isPowerOf2_64(value))
@@ -1697,7 +1698,7 @@ void LLVMFuncOp::build(OpBuilder &builder, OperationState &result,
   result.addAttribute(SymbolTable::getSymbolAttrName(),
                       builder.getStringAttr(name));
   result.addAttribute("type", TypeAttr::get(type));
-  result.addAttribute(getLinkageAttrName(),
+  result.addAttribute(::getLinkageAttrName(),
                       LinkageAttr::get(builder.getContext(), linkage));
   result.attributes.append(attrs.begin(), attrs.end());
   if (dsoLocal)
@@ -1903,7 +1904,7 @@ static LogicalResult verify(LLVMFuncOp op) {
 //===----------------------------------------------------------------------===//
 
 static LogicalResult verify(LLVM::ConstantOp op) {
-  if (StringAttr sAttr = op.value().dyn_cast<StringAttr>()) {
+  if (StringAttr sAttr = op.getValue().dyn_cast<StringAttr>()) {
     auto arrayType = op.getType().dyn_cast<LLVMArrayType>();
     if (!arrayType || arrayType.getNumElements() != sAttr.getValue().size() ||
         !arrayType.getElementType().isInteger(8)) {
@@ -1920,7 +1921,7 @@ static LogicalResult verify(LLVM::ConstantOp op) {
                                "same type, the type of a complex constant";
     }
 
-    auto arrayAttr = op.value().dyn_cast<ArrayAttr>();
+    auto arrayAttr = op.getValue().dyn_cast<ArrayAttr>();
     if (!arrayAttr || arrayAttr.size() != 2 ||
         arrayAttr[0].getType() != arrayAttr[1].getType()) {
       return op.emitOpError() << "expected array attribute with two elements, "
@@ -1936,7 +1937,7 @@ static LogicalResult verify(LLVM::ConstantOp op) {
     }
     return success();
   }
-  if (!op.value().isa<IntegerAttr, ArrayAttr, FloatAttr, ElementsAttr>())
+  if (!op.getValue().isa<IntegerAttr, ArrayAttr, FloatAttr, ElementsAttr>())
     return op.emitOpError()
            << "only supports integer, float, string or elements attributes";
   return success();
@@ -2004,10 +2005,10 @@ static ParseResult parseAtomicOrdering(OpAsmParser &parser,
 //===----------------------------------------------------------------------===//
 
 static void printAtomicRMWOp(OpAsmPrinter &p, AtomicRMWOp &op) {
-  p << ' ' << stringifyAtomicBinOp(op.bin_op()) << ' ' << op.ptr() << ", "
-    << op.val() << ' ' << stringifyAtomicOrdering(op.ordering()) << ' ';
+  p << ' ' << stringifyAtomicBinOp(op.getBinOp()) << ' ' << op.getPtr() << ", "
+    << op.getVal() << ' ' << stringifyAtomicOrdering(op.getOrdering()) << ' ';
   p.printOptionalAttrDict(op->getAttrs(), {"bin_op", "ordering"});
-  p << " : " << op.res().getType();
+  p << " : " << op.getRes().getType();
 }
 
 // <operation> ::= `llvm.atomicrmw` keyword ssa-use `,` ssa-use keyword
@@ -2031,19 +2032,20 @@ static ParseResult parseAtomicRMWOp(OpAsmParser &parser,
 }
 
 static LogicalResult verify(AtomicRMWOp op) {
-  auto ptrType = op.ptr().getType().cast<LLVM::LLVMPointerType>();
-  auto valType = op.val().getType();
+  auto ptrType = op.getPtr().getType().cast<LLVM::LLVMPointerType>();
+  auto valType = op.getVal().getType();
   if (valType != ptrType.getElementType())
     return op.emitOpError("expected LLVM IR element type for operand #0 to "
                           "match type for operand #1");
-  auto resType = op.res().getType();
+  auto resType = op.getRes().getType();
   if (resType != valType)
     return op.emitOpError(
         "expected LLVM IR result type to match type for operand #1");
-  if (op.bin_op() == AtomicBinOp::fadd || op.bin_op() == AtomicBinOp::fsub) {
+  if (op.getBinOp() == AtomicBinOp::fadd ||
+      op.getBinOp() == AtomicBinOp::fsub) {
     if (!mlir::LLVM::isCompatibleFloatingPointType(valType))
       return op.emitOpError("expected LLVM IR floating point type");
-  } else if (op.bin_op() == AtomicBinOp::xchg) {
+  } else if (op.getBinOp() == AtomicBinOp::xchg) {
     auto intType = valType.dyn_cast<IntegerType>();
     unsigned intBitWidth = intType ? intType.getWidth() : 0;
     if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
@@ -2059,7 +2061,7 @@ static LogicalResult verify(AtomicRMWOp op) {
       return op.emitOpError("expected LLVM IR integer type");
   }
 
-  if (static_cast<unsigned>(op.ordering()) <
+  if (static_cast<unsigned>(op.getOrdering()) <
       static_cast<unsigned>(AtomicOrdering::monotonic))
     return op.emitOpError()
            << "expected at least '"
@@ -2074,12 +2076,12 @@ static LogicalResult verify(AtomicRMWOp op) {
 //===----------------------------------------------------------------------===//
 
 static void printAtomicCmpXchgOp(OpAsmPrinter &p, AtomicCmpXchgOp &op) {
-  p << ' ' << op.ptr() << ", " << op.cmp() << ", " << op.val() << ' '
-    << stringifyAtomicOrdering(op.success_ordering()) << ' '
-    << stringifyAtomicOrdering(op.failure_ordering());
+  p << ' ' << op.getPtr() << ", " << op.getCmp() << ", " << op.getVal() << ' '
+    << stringifyAtomicOrdering(op.getSuccessOrdering()) << ' '
+    << stringifyAtomicOrdering(op.getFailureOrdering());
   p.printOptionalAttrDict(op->getAttrs(),
                           {"success_ordering", "failure_ordering"});
-  p << " : " << op.val().getType();
+  p << " : " << op.getVal().getType();
 }
 
 // <operation> ::= `llvm.cmpxchg` ssa-use `,` ssa-use `,` ssa-use
@@ -2111,11 +2113,11 @@ static ParseResult parseAtomicCmpXchgOp(OpAsmParser &parser,
 }
 
 static LogicalResult verify(AtomicCmpXchgOp op) {
-  auto ptrType = op.ptr().getType().cast<LLVM::LLVMPointerType>();
+  auto ptrType = op.getPtr().getType().cast<LLVM::LLVMPointerType>();
   if (!ptrType)
     return op.emitOpError("expected LLVM IR pointer type for operand #0");
-  auto cmpType = op.cmp().getType();
-  auto valType = op.val().getType();
+  auto cmpType = op.getCmp().getType();
+  auto valType = op.getVal().getType();
   if (cmpType != ptrType.getElementType() || cmpType != valType)
     return op.emitOpError("expected LLVM IR element type for operand #0 to "
                           "match type for all other operands");
@@ -2126,11 +2128,11 @@ static LogicalResult verify(AtomicCmpXchgOp op) {
       !valType.isa<BFloat16Type>() && !valType.isa<Float16Type>() &&
       !valType.isa<Float32Type>() && !valType.isa<Float64Type>())
     return op.emitOpError("unexpected LLVM IR type");
-  if (op.success_ordering() < AtomicOrdering::monotonic ||
-      op.failure_ordering() < AtomicOrdering::monotonic)
+  if (op.getSuccessOrdering() < AtomicOrdering::monotonic ||
+      op.getFailureOrdering() < AtomicOrdering::monotonic)
     return op.emitOpError("ordering must be at least 'monotonic'");
-  if (op.failure_ordering() == AtomicOrdering::release ||
-      op.failure_ordering() == AtomicOrdering::acq_rel)
+  if (op.getFailureOrdering() == AtomicOrdering::release ||
+      op.getFailureOrdering() == AtomicOrdering::acq_rel)
     return op.emitOpError("failure ordering cannot be 'release' or 'acq_rel'");
   return success();
 }
@@ -2164,13 +2166,13 @@ static void printFenceOp(OpAsmPrinter &p, FenceOp &op) {
   p << ' ';
   if (!op->getAttr(syncscopeKeyword).cast<StringAttr>().getValue().empty())
     p << "syncscope(" << op->getAttr(syncscopeKeyword) << ") ";
-  p << stringifyAtomicOrdering(op.ordering());
+  p << stringifyAtomicOrdering(op.getOrdering());
 }
 
 static LogicalResult verify(FenceOp &op) {
-  if (op.ordering() == AtomicOrdering::not_atomic ||
-      op.ordering() == AtomicOrdering::unordered ||
-      op.ordering() == AtomicOrdering::monotonic)
+  if (op.getOrdering() == AtomicOrdering::not_atomic ||
+      op.getOrdering() == AtomicOrdering::unordered ||
+      op.getOrdering() == AtomicOrdering::monotonic)
     return op.emitOpError("can be given only acquire, release, acq_rel, "
                           "and seq_cst orderings");
   return success();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
index f465ecb40a3a5..00b74a1b269c5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
@@ -61,7 +61,7 @@ static bool isEqualOffsetSizeOrStride(OpFoldResult op1, OpFoldResult op2) {
     Attribute attr = ofr.dyn_cast<Attribute>();
     // Note: isa+cast-like pattern allows writing the condition below as 1 line.
     if (!attr && ofr.get<Value>().getDefiningOp<arith::ConstantOp>())
-      attr = ofr.get<Value>().getDefiningOp<arith::ConstantOp>().value();
+      attr = ofr.get<Value>().getDefiningOp<arith::ConstantOp>().getValue();
     if (auto intAttr = attr.dyn_cast_or_null<IntegerAttr>())
       return intAttr.getValue().getSExtValue();
     return llvm::None;

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 7c93c51bcae1d..26d581cb1b5c6 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -187,7 +187,7 @@ IntegerAttr getSmallestBoundingIndex(Value size) {
                             : cst.getValue();
   } else if (auto constIndexOp = size.getDefiningOp<arith::ConstantOp>()) {
     if (constIndexOp.getType().isa<IndexType>())
-      boundingConst = constIndexOp.value().cast<IntegerAttr>().getInt();
+      boundingConst = constIndexOp.getValue().cast<IntegerAttr>().getInt();
   } else if (auto affineApplyOp = size.getDefiningOp<AffineApplyOp>()) {
     if (auto cExpr = affineApplyOp.getAffineMap()
                          .getResult(0)
@@ -196,7 +196,7 @@ IntegerAttr getSmallestBoundingIndex(Value size) {
   } else if (auto dimOp = size.getDefiningOp<tensor::DimOp>()) {
     auto shape = dimOp.source().getType().dyn_cast<ShapedType>();
     if (auto constOp = dimOp.index().getDefiningOp<arith::ConstantOp>()) {
-      if (auto indexAttr = constOp.value().dyn_cast<IntegerAttr>()) {
+      if (auto indexAttr = constOp.getValue().dyn_cast<IntegerAttr>()) {
         auto dimIndex = indexAttr.getInt();
         if (!shape.isDynamicDim(dimIndex)) {
           boundingConst = shape.getShape()[dimIndex];

diff  --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index e792d6581566d..0837bd2f888ec 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -691,7 +691,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
 
 Optional<int64_t> DimOp::getConstantIndex() {
   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
-    return constantOp.value().cast<IntegerAttr>().getInt();
+    return constantOp.getValue().cast<IntegerAttr>().getInt();
   return {};
 }
 

diff  --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
index db9c859a58761..6bf4d7eb364c2 100644
--- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
+++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
@@ -173,7 +173,7 @@ struct RemoveConstantIfCondition : public OpRewritePattern<OpTy> {
       return success();
 
     auto constOp = op.ifCond().template getDefiningOp<arith::ConstantOp>();
-    if (constOp && constOp.value().template cast<IntegerAttr>().getInt())
+    if (constOp && constOp.getValue().template cast<IntegerAttr>().getInt())
       rewriter.updateRootInPlace(op, [&]() { op.ifCondMutable().erase(0); });
     else if (constOp)
       rewriter.eraseOp(op);

diff  --git a/mlir/lib/Dialect/SCF/SCF.cpp b/mlir/lib/Dialect/SCF/SCF.cpp
index cc68ff622cdc5..57ad2f412852f 100644
--- a/mlir/lib/Dialect/SCF/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/SCF.cpp
@@ -714,8 +714,8 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
       return failure();
 
     // If the loop is known to have 0 iterations, remove it.
-    llvm::APInt lbValue = lb.value().cast<IntegerAttr>().getValue();
-    llvm::APInt ubValue = ub.value().cast<IntegerAttr>().getValue();
+    llvm::APInt lbValue = lb.getValue().cast<IntegerAttr>().getValue();
+    llvm::APInt ubValue = ub.getValue().cast<IntegerAttr>().getValue();
     if (lbValue.sge(ubValue)) {
       rewriter.replaceOp(op, op.getIterOperands());
       return success();
@@ -727,7 +727,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
 
     // If the loop is known to have 1 iteration, inline its body and remove the
     // loop.
-    llvm::APInt stepValue = step.value().cast<IntegerAttr>().getValue();
+    llvm::APInt stepValue = step.getValue().cast<IntegerAttr>().getValue();
     if ((lbValue + stepValue).sge(ubValue)) {
       SmallVector<Value, 4> blockArgs;
       blockArgs.reserve(op.getNumIterOperands() + 1);
@@ -1241,7 +1241,7 @@ struct RemoveStaticCondition : public OpRewritePattern<IfOp> {
     if (!constant)
       return failure();
 
-    if (constant.value().cast<BoolAttr>().getValue())
+    if (constant.getValue().cast<BoolAttr>().getValue())
       replaceOpWithRegion(rewriter, op, op.thenRegion());
     else if (!op.elseRegion().empty())
       replaceOpWithRegion(rewriter, op, op.elseRegion());
@@ -1425,8 +1425,8 @@ struct ReplaceIfYieldWithConditionOrValue : public OpRewritePattern<IfOp> {
       if (!falseYield)
         continue;
 
-      bool trueVal = trueYield.value().cast<BoolAttr>().getValue();
-      bool falseVal = falseYield.value().cast<BoolAttr>().getValue();
+      bool trueVal = trueYield.getValue().cast<BoolAttr>().getValue();
+      bool falseVal = falseYield.getValue().cast<BoolAttr>().getValue();
       if (!trueVal && falseVal) {
         if (!opResult.use_empty()) {
           Value notCond = rewriter.create<arith::XOrIOp>(

diff  --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index f21fafead0b49..635d3c05a72e3 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -45,17 +45,17 @@ bool shape::isExtentTensorType(Type type) {
 LogicalResult shape::getShapeVec(Value input,
                                  SmallVectorImpl<int64_t> &shapeValues) {
   if (auto inputOp = input.getDefiningOp<ShapeOfOp>()) {
-    auto type = inputOp.arg().getType().dyn_cast<ShapedType>();
+    auto type = inputOp.getArg().getType().dyn_cast<ShapedType>();
     if (!type.hasRank())
       return failure();
     shapeValues = llvm::to_vector<6>(type.getShape());
     return success();
   } else if (auto inputOp = input.getDefiningOp<ConstShapeOp>()) {
-    shapeValues = llvm::to_vector<6>(inputOp.shape().getValues<int64_t>());
+    shapeValues = llvm::to_vector<6>(inputOp.getShape().getValues<int64_t>());
     return success();
   } else if (auto inputOp = input.getDefiningOp<arith::ConstantOp>()) {
     shapeValues = llvm::to_vector<6>(
-        inputOp.value().cast<DenseIntElementsAttr>().getValues<int64_t>());
+        inputOp.getValue().cast<DenseIntElementsAttr>().getValues<int64_t>());
     return success();
   } else {
     return failure();
@@ -218,7 +218,7 @@ LogicalResult ShapeDialect::verifyOperationAttribute(Operation *op,
         if (!shapeFnLib)
           return op->emitError()
                  << it << " does not refer to FunctionLibraryOp";
-        for (auto mapping : shapeFnLib.mapping()) {
+        for (auto mapping : shapeFnLib.getMapping()) {
           if (!key.insert(mapping.first).second) {
             return op->emitError("only one op to shape mapping allowed, found "
                                  "multiple for `")
@@ -281,13 +281,13 @@ static ParseResult parseAssumingOp(OpAsmParser &parser,
 }
 
 static void print(OpAsmPrinter &p, AssumingOp op) {
-  bool yieldsResults = !op.results().empty();
+  bool yieldsResults = !op.getResults().empty();
 
-  p << " " << op.witness();
+  p << " " << op.getWitness();
   if (yieldsResults) {
     p << " -> (" << op.getResultTypes() << ")";
   }
-  p.printRegion(op.doRegion(),
+  p.printRegion(op.getDoRegion(),
                 /*printEntryBlockArgs=*/false,
                 /*printBlockTerminators=*/yieldsResults);
   p.printOptionalAttrDict(op->getAttrs());
@@ -300,8 +300,8 @@ struct AssumingWithTrue : public OpRewritePattern<AssumingOp> {
 
   LogicalResult matchAndRewrite(AssumingOp op,
                                 PatternRewriter &rewriter) const override {
-    auto witness = op.witness().getDefiningOp<ConstWitnessOp>();
-    if (!witness || !witness.passingAttr())
+    auto witness = op.getWitness().getDefiningOp<ConstWitnessOp>();
+    if (!witness || !witness.getPassingAttr())
       return failure();
 
     AssumingOp::inlineRegionIntoParent(op, rewriter);
@@ -320,7 +320,7 @@ struct AssumingOpRemoveUnusedResults : public OpRewritePattern<AssumingOp> {
     // Find used values.
     SmallVector<Value, 4> newYieldOperands;
     Value opResult, yieldOperand;
-    for (auto it : llvm::zip(op.getResults(), yieldOp.operands())) {
+    for (auto it : llvm::zip(op.getResults(), yieldOp.getOperands())) {
       std::tie(opResult, yieldOperand) = it;
       if (!opResult.getUses().empty()) {
         newYieldOperands.push_back(yieldOperand);
@@ -338,8 +338,8 @@ struct AssumingOpRemoveUnusedResults : public OpRewritePattern<AssumingOp> {
         rewriter.replaceOpWithNewOp<AssumingYieldOp>(yieldOp, newYieldOperands);
     rewriter.setInsertionPoint(op);
     auto newOp = rewriter.create<AssumingOp>(
-        op.getLoc(), newYieldOp->getOperandTypes(), op.witness());
-    newOp.doRegion().takeBody(op.doRegion());
+        op.getLoc(), newYieldOp->getOperandTypes(), op.getWitness());
+    newOp.getDoRegion().takeBody(op.getDoRegion());
 
     // Use the new results to replace the previously used ones.
     SmallVector<Value, 4> replacementValues;
@@ -373,7 +373,7 @@ void AssumingOp::getSuccessorRegions(
     return;
   }
 
-  regions.push_back(RegionSuccessor(&doRegion()));
+  regions.push_back(RegionSuccessor(&getDoRegion()));
 }
 
 void AssumingOp::inlineRegionIntoParent(AssumingOp &op,
@@ -386,7 +386,7 @@ void AssumingOp::inlineRegionIntoParent(AssumingOp &op,
 
   // Remove the AssumingOp and AssumingYieldOp.
   auto &yieldOp = assumingBlock->back();
-  rewriter.inlineRegionBefore(op.doRegion(), blockAfterAssuming);
+  rewriter.inlineRegionBefore(op.getDoRegion(), blockAfterAssuming);
   rewriter.replaceOp(op, yieldOp.getOperands());
   rewriter.eraseOp(&yieldOp);
 
@@ -440,8 +440,8 @@ bool mlir::shape::AddOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
 
 OpFoldResult mlir::shape::AddOp::fold(ArrayRef<Attribute> operands) {
   // add(x, 0) -> x
-  if (matchPattern(rhs(), m_Zero()))
-    return lhs();
+  if (matchPattern(getRhs(), m_Zero()))
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(operands,
                                         [](APInt a, APInt b) { return a + b; });
@@ -459,16 +459,16 @@ struct AssumingAllToCstrEqCanonicalization
   LogicalResult matchAndRewrite(AssumingAllOp op,
                                 PatternRewriter &rewriter) const override {
     SmallVector<Value, 8> shapes;
-    for (Value w : op.inputs()) {
+    for (Value w : op.getInputs()) {
       auto cstrEqOp = w.getDefiningOp<CstrEqOp>();
       if (!cstrEqOp)
         return failure();
-      bool disjointShapes = llvm::none_of(cstrEqOp.shapes(), [&](Value s) {
+      bool disjointShapes = llvm::none_of(cstrEqOp.getShapes(), [&](Value s) {
         return llvm::is_contained(shapes, s);
       });
-      if (!shapes.empty() && !cstrEqOp.shapes().empty() && disjointShapes)
+      if (!shapes.empty() && !cstrEqOp.getShapes().empty() && disjointShapes)
         return failure();
-      shapes.append(cstrEqOp.shapes().begin(), cstrEqOp.shapes().end());
+      shapes.append(cstrEqOp.getShapes().begin(), cstrEqOp.getShapes().end());
     }
     rewriter.replaceOpWithNewOp<CstrEqOp>(op, shapes);
     return success();
@@ -545,15 +545,15 @@ void AssumingAllOp::build(OpBuilder &b, OperationState &state,
 //===----------------------------------------------------------------------===//
 
 OpFoldResult BroadcastOp::fold(ArrayRef<Attribute> operands) {
-  if (shapes().size() == 1) {
+  if (getShapes().size() == 1) {
     // Otherwise, we need a cast which would be a canonicalization, not folding.
-    if (shapes().front().getType() != getType())
+    if (getShapes().front().getType() != getType())
       return nullptr;
-    return shapes().front();
+    return getShapes().front();
   }
 
   // TODO: Support folding with more than 2 input shapes
-  if (shapes().size() > 2)
+  if (getShapes().size() > 2)
     return nullptr;
 
   if (!operands[0] || !operands[1])
@@ -590,7 +590,7 @@ struct RemoveEmptyShapeOperandsPattern : public OpRewritePattern<OpTy> {
           return false;
       }
       if (auto constShape = shape.getDefiningOp<ConstShapeOp>()) {
-        if (constShape.shape().empty())
+        if (constShape.getShape().empty())
           return false;
       }
       return true;
@@ -617,7 +617,7 @@ struct BroadcastForwardSingleOperandPattern
                                 PatternRewriter &rewriter) const override {
     if (op.getNumOperands() != 1)
       return failure();
-    Value replacement = op.shapes().front();
+    Value replacement = op.getShapes().front();
 
     // Insert cast if needed.
     if (replacement.getType() != op.getType()) {
@@ -646,12 +646,12 @@ struct BroadcastFoldConstantOperandsPattern
                                 PatternRewriter &rewriter) const override {
     SmallVector<int64_t, 8> foldedConstantShape;
     SmallVector<Value, 8> newShapeOperands;
-    for (Value shape : op.shapes()) {
+    for (Value shape : op.getShapes()) {
       if (auto constShape = shape.getDefiningOp<ConstShapeOp>()) {
         SmallVector<int64_t, 8> newFoldedConstantShape;
         if (OpTrait::util::getBroadcastedShape(
                 foldedConstantShape,
-                llvm::to_vector<8>(constShape.shape().getValues<int64_t>()),
+                llvm::to_vector<8>(constShape.getShape().getValues<int64_t>()),
                 newFoldedConstantShape)) {
           foldedConstantShape = newFoldedConstantShape;
           continue;
@@ -721,7 +721,7 @@ struct BroadcastConcretizeResultTypePattern
 
     // Infer resulting shape rank if possible.
     int64_t maxRank = 0;
-    for (Value shape : op.shapes()) {
+    for (Value shape : op.getShapes()) {
       if (auto extentTensorTy = shape.getType().dyn_cast<RankedTensorType>()) {
         // Cannot infer resulting shape rank if any operand is dynamically
         // ranked.
@@ -732,7 +732,8 @@ struct BroadcastConcretizeResultTypePattern
     }
 
     auto newOp = rewriter.create<BroadcastOp>(
-        op.getLoc(), getExtentTensorType(getContext(), maxRank), op.shapes());
+        op.getLoc(), getExtentTensorType(getContext(), maxRank),
+        op.getShapes());
     rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
     return success();
   }
@@ -775,7 +776,7 @@ static void print(OpAsmPrinter &p, ConstShapeOp &op) {
   p << " ";
   p.printOptionalAttrDict(op->getAttrs(), /*elidedAttrs=*/{"shape"});
   p << "[";
-  interleaveComma(op.shape().getValues<int64_t>(), p,
+  interleaveComma(op.getShape().getValues<int64_t>(), p,
                   [&](int64_t i) { p << i; });
   p << "] : ";
   p.printType(op.getType());
@@ -811,7 +812,7 @@ static ParseResult parseConstShapeOp(OpAsmParser &parser,
   return success();
 }
 
-OpFoldResult ConstShapeOp::fold(ArrayRef<Attribute>) { return shapeAttr(); }
+OpFoldResult ConstShapeOp::fold(ArrayRef<Attribute>) { return getShapeAttr(); }
 
 void ConstShapeOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
                                                MLIRContext *context) {
@@ -895,7 +896,7 @@ OpFoldResult CstrBroadcastableOp::fold(ArrayRef<Attribute> operands) {
   // on the input shapes.
   if ([&] {
         SmallVector<SmallVector<int64_t, 6>, 6> extents;
-        for (auto shapeValue : shapes()) {
+        for (auto shapeValue : getShapes()) {
           extents.emplace_back();
           if (failed(getShapeVec(shapeValue, extents.back())))
             return false;
@@ -946,13 +947,13 @@ void ConstSizeOp::build(OpBuilder &builder, OperationState &result,
   build(builder, result, builder.getIndexAttr(value));
 }
 
-OpFoldResult ConstSizeOp::fold(ArrayRef<Attribute>) { return valueAttr(); }
+OpFoldResult ConstSizeOp::fold(ArrayRef<Attribute>) { return getValueAttr(); }
 
 void ConstSizeOp::getAsmResultNames(
     llvm::function_ref<void(Value, StringRef)> setNameFn) {
   SmallString<4> buffer;
   llvm::raw_svector_ostream os(buffer);
-  os << "c" << value();
+  os << "c" << getValue();
   setNameFn(getResult(), os.str());
 }
 
@@ -960,7 +961,9 @@ void ConstSizeOp::getAsmResultNames(
 // ConstWitnessOp
 //===----------------------------------------------------------------------===//
 
-OpFoldResult ConstWitnessOp::fold(ArrayRef<Attribute>) { return passingAttr(); }
+OpFoldResult ConstWitnessOp::fold(ArrayRef<Attribute>) {
+  return getPassingAttr();
+}
 
 //===----------------------------------------------------------------------===//
 // CstrRequireOp
@@ -1069,7 +1072,7 @@ void FunctionLibraryOp::build(OpBuilder &builder, OperationState &result,
 }
 
 FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
-  auto attr = mapping()
+  auto attr = getMapping()
                   .get(op->getName().getIdentifier())
                   .dyn_cast_or_null<FlatSymbolRefAttr>();
   if (!attr)
@@ -1111,7 +1114,7 @@ void print(OpAsmPrinter &p, FunctionLibraryOp op) {
   p.printRegion(op.getOperation()->getRegion(0), /*printEntryBlockArgs=*/false,
                 /*printBlockTerminators=*/false);
   p << " mapping ";
-  p.printAttributeWithoutType(op.mappingAttr());
+  p.printAttributeWithoutType(op.getMappingAttr());
 }
 
 //===----------------------------------------------------------------------===//
@@ -1119,10 +1122,10 @@ void print(OpAsmPrinter &p, FunctionLibraryOp op) {
 //===----------------------------------------------------------------------===//
 
 Optional<int64_t> GetExtentOp::getConstantDim() {
-  if (auto constSizeOp = dim().getDefiningOp<ConstSizeOp>())
-    return constSizeOp.value().getLimitedValue();
-  if (auto constantOp = dim().getDefiningOp<arith::ConstantOp>())
-    return constantOp.value().cast<IntegerAttr>().getInt();
+  if (auto constSizeOp = getDim().getDefiningOp<ConstSizeOp>())
+    return constSizeOp.getValue().getLimitedValue();
+  if (auto constantOp = getDim().getDefiningOp<arith::ConstantOp>())
+    return constantOp.getValue().cast<IntegerAttr>().getInt();
   return llvm::None;
 }
 
@@ -1250,11 +1253,11 @@ struct RankShapeOfCanonicalizationPattern
 
   LogicalResult matchAndRewrite(shape::RankOp op,
                                 PatternRewriter &rewriter) const override {
-    auto shapeOfOp = op.shape().getDefiningOp<ShapeOfOp>();
+    auto shapeOfOp = op.getShape().getDefiningOp<ShapeOfOp>();
     if (!shapeOfOp)
       return failure();
     auto rankedTensorType =
-        shapeOfOp.arg().getType().dyn_cast<RankedTensorType>();
+        shapeOfOp.getArg().getType().dyn_cast<RankedTensorType>();
     if (!rankedTensorType)
       return failure();
     int64_t rank = rankedTensorType.getRank();
@@ -1333,8 +1336,8 @@ bool mlir::shape::NumElementsOp::isCompatibleReturnTypes(TypeRange l,
 
 OpFoldResult MaxOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
   // If operands are equal, just propagate one.
-  if (lhs() == rhs())
-    return lhs();
+  if (getLhs() == getRhs())
+    return getLhs();
   return nullptr;
 }
 
@@ -1365,8 +1368,8 @@ bool mlir::shape::MaxOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
 
 OpFoldResult MinOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
   // If operands are equal, just propagate one.
-  if (lhs() == rhs())
-    return lhs();
+  if (getLhs() == getRhs())
+    return getLhs();
   return nullptr;
 }
 
@@ -1441,12 +1444,13 @@ struct ShapeOfWithTensor : public OpRewritePattern<shape::ShapeOfOp> {
 
   LogicalResult matchAndRewrite(shape::ShapeOfOp op,
                                 PatternRewriter &rewriter) const override {
-    if (!op.arg().getType().isa<ShapedType>())
+    if (!op.getArg().getType().isa<ShapedType>())
       return failure();
     if (op.getType().isa<ShapedType>())
       return failure();
 
-    rewriter.replaceOpWithNewOp<shape::ShapeOfOp>(op.getOperation(), op.arg());
+    rewriter.replaceOpWithNewOp<shape::ShapeOfOp>(op.getOperation(),
+                                                  op.getArg());
     return success();
   }
 };
@@ -1474,11 +1478,11 @@ struct ShapeOfCastExtentTensor : public OpRewritePattern<tensor::CastOp> {
       return failure();
 
     // Argument type must be ranked and must not conflict.
-    auto argTy = shapeOfOp.arg().getType().dyn_cast<RankedTensorType>();
+    auto argTy = shapeOfOp.getArg().getType().dyn_cast<RankedTensorType>();
     if (!argTy || (!ty.isDynamicDim(0) && ty.getDimSize(0) != argTy.getRank()))
       return failure();
 
-    rewriter.replaceOpWithNewOp<ShapeOfOp>(op, ty, shapeOfOp.arg());
+    rewriter.replaceOpWithNewOp<ShapeOfOp>(op, ty, shapeOfOp.getArg());
     return success();
   }
 };
@@ -1634,10 +1638,10 @@ void ReduceOp::build(OpBuilder &builder, OperationState &result, Value shape,
 
 static LogicalResult verify(ReduceOp op) {
   // Verify block arg types.
-  Block &block = op.region().front();
+  Block &block = op.getRegion().front();
 
   // The block takes index, extent, and aggregated values as arguments.
-  auto blockArgsCount = op.initVals().size() + 2;
+  auto blockArgsCount = op.getInitVals().size() + 2;
   if (block.getNumArguments() != blockArgsCount)
     return op.emitOpError() << "ReduceOp body is expected to have "
                             << blockArgsCount << " arguments";
@@ -1651,7 +1655,7 @@ static LogicalResult verify(ReduceOp op) {
   // `index`, depending on whether the reduce operation is applied to a shape or
   // to an extent tensor.
   Type extentTy = block.getArgument(1).getType();
-  if (op.shape().getType().isa<ShapeType>()) {
+  if (op.getShape().getType().isa<ShapeType>()) {
     if (!extentTy.isa<SizeType>())
       return op.emitOpError("argument 1 of ReduceOp body is expected to be of "
                             "SizeType if the ReduceOp operates on a ShapeType");
@@ -1662,7 +1666,7 @@ static LogicalResult verify(ReduceOp op) {
           "ReduceOp operates on an extent tensor");
   }
 
-  for (auto type : llvm::enumerate(op.initVals()))
+  for (auto type : llvm::enumerate(op.getInitVals()))
     if (block.getArgument(type.index() + 2).getType() != type.value().getType())
       return op.emitOpError()
              << "type mismatch between argument " << type.index() + 2
@@ -1701,10 +1705,10 @@ static ParseResult parseReduceOp(OpAsmParser &parser, OperationState &result) {
 }
 
 static void print(OpAsmPrinter &p, ReduceOp op) {
-  p << '(' << op.shape() << ", " << op.initVals()
-    << ") : " << op.shape().getType();
+  p << '(' << op.getShape() << ", " << op.getInitVals()
+    << ") : " << op.getShape().getType();
   p.printOptionalArrowTypeList(op.getResultTypes());
-  p.printRegion(op.region());
+  p.printRegion(op.getRegion());
   p.printOptionalAttrDict(op->getAttrs());
 }
 

diff  --git a/mlir/lib/Dialect/Shape/Transforms/ShapeToShapeLowering.cpp b/mlir/lib/Dialect/Shape/Transforms/ShapeToShapeLowering.cpp
index a6549dad779df..3c1b84023ce94 100644
--- a/mlir/lib/Dialect/Shape/Transforms/ShapeToShapeLowering.cpp
+++ b/mlir/lib/Dialect/Shape/Transforms/ShapeToShapeLowering.cpp
@@ -39,7 +39,7 @@ NumElementsOpConverter::matchAndRewrite(NumElementsOp op,
                    ->materializeConstant(rewriter, rewriter.getIndexAttr(1),
                                          valueType, loc)
                    ->getResult(0);
-  ReduceOp reduce = rewriter.create<ReduceOp>(loc, op.shape(), init);
+  ReduceOp reduce = rewriter.create<ReduceOp>(loc, op.getShape(), init);
 
   // Generate reduce operator.
   Block *body = reduce.getBody();
@@ -48,7 +48,7 @@ NumElementsOpConverter::matchAndRewrite(NumElementsOp op,
                                   body->getArgument(2));
   b.create<shape::YieldOp>(loc, product);
 
-  rewriter.replaceOp(op, reduce.result());
+  rewriter.replaceOp(op, reduce.getResult());
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/Shape/Transforms/StructuralTypeConversions.cpp b/mlir/lib/Dialect/Shape/Transforms/StructuralTypeConversions.cpp
index 008164aac95ba..e368eca78a649 100644
--- a/mlir/lib/Dialect/Shape/Transforms/StructuralTypeConversions.cpp
+++ b/mlir/lib/Dialect/Shape/Transforms/StructuralTypeConversions.cpp
@@ -30,10 +30,10 @@ class ConvertAssumingOpTypes : public OpConversionPattern<AssumingOp> {
       newResultTypes.push_back(convertedType);
     }
 
-    auto newAssumingOp =
-        rewriter.create<AssumingOp>(op.getLoc(), newResultTypes, op.witness());
-    rewriter.inlineRegionBefore(op.doRegion(), newAssumingOp.doRegion(),
-                                newAssumingOp.doRegion().end());
+    auto newAssumingOp = rewriter.create<AssumingOp>(
+        op.getLoc(), newResultTypes, op.getWitness());
+    rewriter.inlineRegionBefore(op.getDoRegion(), newAssumingOp.getDoRegion(),
+                                newAssumingOp.getDoRegion().end());
     rewriter.replaceOp(op, newAssumingOp.getResults());
 
     return success();

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index bb499be5052a3..3b123545baf1c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -193,7 +193,7 @@ mlir::sparse_tensor::getSparseTensorEncoding(Type type) {
 
 static LogicalResult isInBounds(Value dim, Value tensor) {
   if (auto constantOp = dim.getDefiningOp<arith::ConstantOp>()) {
-    unsigned d = constantOp.value().cast<IntegerAttr>().getInt();
+    unsigned d = constantOp.getValue().cast<IntegerAttr>().getInt();
     if (d >= tensor.getType().cast<RankedTensorType>().getRank())
       return failure();
   }
@@ -227,7 +227,7 @@ static LogicalResult verify(InitOp op) {
       continue;
     auto constantOp = op.sizes()[i].getDefiningOp<arith::ConstantOp>();
     if (!constantOp ||
-        constantOp.value().cast<IntegerAttr>().getInt() != shape[i])
+        constantOp.getValue().cast<IntegerAttr>().getInt() != shape[i])
       return op.emitError("unexpected mismatch with static dimension size ")
              << shape[i];
   }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index db6b8d2548f06..4b122aec293d3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -351,7 +351,7 @@ static Optional<std::pair<Value, Value>>
 genSplitSparseConstant(ConversionPatternRewriter &rewriter, Location loc,
                        Value tensor) {
   if (auto constOp = tensor.getDefiningOp<arith::ConstantOp>()) {
-    if (auto attr = constOp.value().dyn_cast<SparseElementsAttr>()) {
+    if (auto attr = constOp.getValue().dyn_cast<SparseElementsAttr>()) {
       DenseElementsAttr indicesAttr = attr.getIndices();
       Value indices = rewriter.create<arith::ConstantOp>(loc, indicesAttr);
       DenseElementsAttr valuesAttr = attr.getValues();

diff  --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 81678c912e48e..9b6052bcc2902 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -146,7 +146,7 @@ Operation *StandardOpsDialect::materializeConstant(OpBuilder &builder,
 
 LogicalResult AssertOp::canonicalize(AssertOp op, PatternRewriter &rewriter) {
   // Erase assertion if argument is constant true.
-  if (matchPattern(op.arg(), m_One())) {
+  if (matchPattern(op.getArg(), m_One())) {
     rewriter.eraseOp(op);
     return success();
   }
@@ -161,14 +161,14 @@ static LogicalResult verify(AtomicRMWOp op) {
   if (op.getMemRefType().getRank() != op.getNumOperands() - 2)
     return op.emitOpError(
         "expects the number of subscripts to be equal to memref rank");
-  switch (op.kind()) {
+  switch (op.getKind()) {
   case AtomicRMWKind::addf:
   case AtomicRMWKind::maxf:
   case AtomicRMWKind::minf:
   case AtomicRMWKind::mulf:
-    if (!op.value().getType().isa<FloatType>())
+    if (!op.getValue().getType().isa<FloatType>())
       return op.emitOpError()
-             << "with kind '" << stringifyAtomicRMWKind(op.kind())
+             << "with kind '" << stringifyAtomicRMWKind(op.getKind())
              << "' expects a floating-point type";
     break;
   case AtomicRMWKind::addi:
@@ -177,9 +177,9 @@ static LogicalResult verify(AtomicRMWOp op) {
   case AtomicRMWKind::mins:
   case AtomicRMWKind::minu:
   case AtomicRMWKind::muli:
-    if (!op.value().getType().isa<IntegerType>())
+    if (!op.getValue().getType().isa<IntegerType>())
       return op.emitOpError()
-             << "with kind '" << stringifyAtomicRMWKind(op.kind())
+             << "with kind '" << stringifyAtomicRMWKind(op.getKind())
              << "' expects an integer type";
     break;
   default:
@@ -308,7 +308,7 @@ void GenericAtomicRMWOp::build(OpBuilder &builder, OperationState &result,
 }
 
 static LogicalResult verify(GenericAtomicRMWOp op) {
-  auto &body = op.body();
+  auto &body = op.getRegion();
   if (body.getNumArguments() != 1)
     return op.emitOpError("expected single number of entry block arguments");
 
@@ -351,9 +351,9 @@ static ParseResult parseGenericAtomicRMWOp(OpAsmParser &parser,
 }
 
 static void print(OpAsmPrinter &p, GenericAtomicRMWOp op) {
-  p << ' ' << op.memref() << "[" << op.indices()
-    << "] : " << op.memref().getType();
-  p.printRegion(op.body());
+  p << ' ' << op.getMemref() << "[" << op.getIndices()
+    << "] : " << op.getMemref().getType();
+  p.printRegion(op.getRegion());
   p.printOptionalAttrDict(op->getAttrs());
 }
 
@@ -363,7 +363,7 @@ static void print(OpAsmPrinter &p, GenericAtomicRMWOp op) {
 
 static LogicalResult verify(AtomicYieldOp op) {
   Type parentType = op->getParentOp()->getResultTypes().front();
-  Type resultType = op.result().getType();
+  Type resultType = op.getResult().getType();
   if (parentType != resultType)
     return op.emitOpError() << "types mismatch between yield op: " << resultType
                             << " and its parent: " << parentType;
@@ -467,8 +467,6 @@ LogicalResult BranchOp::canonicalize(BranchOp op, PatternRewriter &rewriter) {
                  succeeded(simplifyPassThroughBr(op, rewriter)));
 }
 
-Block *BranchOp::getDest() { return getSuccessor(); }
-
 void BranchOp::setDest(Block *block) { return setSuccessor(block); }
 
 void BranchOp::eraseOperand(unsigned index) { (*this)->eraseOperand(index); }
@@ -476,10 +474,12 @@ void BranchOp::eraseOperand(unsigned index) { (*this)->eraseOperand(index); }
 Optional<MutableOperandRange>
 BranchOp::getMutableSuccessorOperands(unsigned index) {
   assert(index == 0 && "invalid successor index");
-  return destOperandsMutable();
+  return getDestOperandsMutable();
 }
 
-Block *BranchOp::getSuccessorForOperands(ArrayRef<Attribute>) { return dest(); }
+Block *BranchOp::getSuccessorForOperands(ArrayRef<Attribute>) {
+  return getDest();
+}
 
 //===----------------------------------------------------------------------===//
 // CallOp
@@ -602,7 +602,7 @@ struct SimplifyPassThroughCondBranch : public OpRewritePattern<CondBranchOp> {
 
   LogicalResult matchAndRewrite(CondBranchOp condbr,
                                 PatternRewriter &rewriter) const override {
-    Block *trueDest = condbr.trueDest(), *falseDest = condbr.falseDest();
+    Block *trueDest = condbr.getTrueDest(), *falseDest = condbr.getFalseDest();
     ValueRange trueDestOperands = condbr.getTrueOperands();
     ValueRange falseDestOperands = condbr.getFalseOperands();
     SmallVector<Value, 4> trueDestOperandStorage, falseDestOperandStorage;
@@ -638,8 +638,8 @@ struct SimplifyCondBranchIdenticalSuccessors
                                 PatternRewriter &rewriter) const override {
     // Check that the true and false destinations are the same and have the same
     // operands.
-    Block *trueDest = condbr.trueDest();
-    if (trueDest != condbr.falseDest())
+    Block *trueDest = condbr.getTrueDest();
+    if (trueDest != condbr.getFalseDest())
       return failure();
 
     // If all of the operands match, no selects need to be generated.
@@ -707,12 +707,12 @@ struct SimplifyCondBranchFromCondBranchOnSameCondition
       return failure();
 
     // Fold this branch to an unconditional branch.
-    if (currentBlock == predBranch.trueDest())
-      rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.trueDest(),
-                                            condbr.trueDestOperands());
+    if (currentBlock == predBranch.getTrueDest())
+      rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getTrueDest(),
+                                            condbr.getTrueDestOperands());
     else
-      rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.falseDest(),
-                                            condbr.falseDestOperands());
+      rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getFalseDest(),
+                                            condbr.getFalseDestOperands());
     return success();
   }
 };
@@ -758,7 +758,7 @@ struct CondBranchTruthPropagation : public OpRewritePattern<CondBranchOp> {
     // op.
     if (condbr.getTrueDest()->getSinglePredecessor()) {
       for (OpOperand &use :
-           llvm::make_early_inc_range(condbr.condition().getUses())) {
+           llvm::make_early_inc_range(condbr.getCondition().getUses())) {
         if (use.getOwner()->getBlock() == condbr.getTrueDest()) {
           replaced = true;
 
@@ -773,7 +773,7 @@ struct CondBranchTruthPropagation : public OpRewritePattern<CondBranchOp> {
     }
     if (condbr.getFalseDest()->getSinglePredecessor()) {
       for (OpOperand &use :
-           llvm::make_early_inc_range(condbr.condition().getUses())) {
+           llvm::make_early_inc_range(condbr.getCondition().getUses())) {
         if (use.getOwner()->getBlock() == condbr.getFalseDest()) {
           replaced = true;
 
@@ -802,13 +802,13 @@ void CondBranchOp::getCanonicalizationPatterns(RewritePatternSet &results,
 Optional<MutableOperandRange>
 CondBranchOp::getMutableSuccessorOperands(unsigned index) {
   assert(index < getNumSuccessors() && "invalid successor index");
-  return index == trueIndex ? trueDestOperandsMutable()
-                            : falseDestOperandsMutable();
+  return index == trueIndex ? getTrueDestOperandsMutable()
+                            : getFalseDestOperandsMutable();
 }
 
 Block *CondBranchOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
   if (IntegerAttr condAttr = operands.front().dyn_cast_or_null<IntegerAttr>())
-    return condAttr.getValue().isOneValue() ? trueDest() : falseDest();
+    return condAttr.getValue().isOneValue() ? getTrueDest() : getFalseDest();
   return nullptr;
 }
 
@@ -947,19 +947,19 @@ OpFoldResult MaxSIOp::fold(ArrayRef<Attribute> operands) {
   assert(operands.size() == 2 && "binary operation takes two operands");
 
   // maxsi(x,x) -> x
-  if (lhs() == rhs())
-    return rhs();
+  if (getLhs() == getRhs())
+    return getRhs();
 
   APInt intValue;
   // maxsi(x,MAX_INT) -> MAX_INT
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) &&
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) &&
       intValue.isMaxSignedValue())
-    return rhs();
+    return getRhs();
 
   // maxsi(x, MIN_INT) -> x
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) &&
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) &&
       intValue.isMinSignedValue())
-    return lhs();
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(
       operands, [](APInt a, APInt b) { return llvm::APIntOps::smax(a, b); });
@@ -973,17 +973,17 @@ OpFoldResult MaxUIOp::fold(ArrayRef<Attribute> operands) {
   assert(operands.size() == 2 && "binary operation takes two operands");
 
   // maxui(x,x) -> x
-  if (lhs() == rhs())
-    return rhs();
+  if (getLhs() == getRhs())
+    return getRhs();
 
   APInt intValue;
   // maxui(x,MAX_INT) -> MAX_INT
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) && intValue.isMaxValue())
-    return rhs();
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) && intValue.isMaxValue())
+    return getRhs();
 
   // maxui(x, MIN_INT) -> x
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) && intValue.isMinValue())
-    return lhs();
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) && intValue.isMinValue())
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(
       operands, [](APInt a, APInt b) { return llvm::APIntOps::umax(a, b); });
@@ -997,19 +997,19 @@ OpFoldResult MinSIOp::fold(ArrayRef<Attribute> operands) {
   assert(operands.size() == 2 && "binary operation takes two operands");
 
   // minsi(x,x) -> x
-  if (lhs() == rhs())
-    return rhs();
+  if (getLhs() == getRhs())
+    return getRhs();
 
   APInt intValue;
   // minsi(x,MIN_INT) -> MIN_INT
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) &&
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) &&
       intValue.isMinSignedValue())
-    return rhs();
+    return getRhs();
 
   // minsi(x, MAX_INT) -> x
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) &&
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) &&
       intValue.isMaxSignedValue())
-    return lhs();
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(
       operands, [](APInt a, APInt b) { return llvm::APIntOps::smin(a, b); });
@@ -1023,17 +1023,17 @@ OpFoldResult MinUIOp::fold(ArrayRef<Attribute> operands) {
   assert(operands.size() == 2 && "binary operation takes two operands");
 
   // minui(x,x) -> x
-  if (lhs() == rhs())
-    return rhs();
+  if (getLhs() == getRhs())
+    return getRhs();
 
   APInt intValue;
   // minui(x,MIN_INT) -> MIN_INT
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) && intValue.isMinValue())
-    return rhs();
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) && intValue.isMinValue())
+    return getRhs();
 
   // minui(x, MAX_INT) -> x
-  if (matchPattern(rhs(), m_ConstantInt(&intValue)) && intValue.isMaxValue())
-    return lhs();
+  if (matchPattern(getRhs(), m_ConstantInt(&intValue)) && intValue.isMaxValue())
+    return getLhs();
 
   return constFoldBinaryOp<IntegerAttr>(
       operands, [](APInt a, APInt b) { return llvm::APIntOps::umin(a, b); });
@@ -1103,7 +1103,7 @@ struct SelectToNot : public OpRewritePattern<SelectOp> {
     if (!op.getType().isInteger(1))
       return failure();
 
-    rewriter.replaceOpWithNewOp<arith::XOrIOp>(op, op.condition(),
+    rewriter.replaceOpWithNewOp<arith::XOrIOp>(op, op.getCondition(),
                                                op.getFalseValue());
     return success();
   }
@@ -1131,10 +1131,10 @@ OpFoldResult SelectOp::fold(ArrayRef<Attribute> operands) {
     return falseVal;
 
   if (auto cmp = dyn_cast_or_null<arith::CmpIOp>(condition.getDefiningOp())) {
-    auto pred = cmp.predicate();
+    auto pred = cmp.getPredicate();
     if (pred == arith::CmpIPredicate::eq || pred == arith::CmpIPredicate::ne) {
-      auto cmpLhs = cmp.lhs();
-      auto cmpRhs = cmp.rhs();
+      auto cmpLhs = cmp.getLhs();
+      auto cmpRhs = cmp.getRhs();
 
       // %0 = arith.cmpi eq, %arg0, %arg1
       // %1 = select %0, %arg0, %arg1 => %arg1
@@ -1334,13 +1334,13 @@ static void printSwitchOpCases(
 }
 
 static LogicalResult verify(SwitchOp op) {
-  auto caseValues = op.case_values();
-  auto caseDestinations = op.caseDestinations();
+  auto caseValues = op.getCaseValues();
+  auto caseDestinations = op.getCaseDestinations();
 
   if (!caseValues && caseDestinations.empty())
     return success();
 
-  Type flagType = op.flag().getType();
+  Type flagType = op.getFlag().getType();
   Type caseValueType = caseValues->getType().getElementType();
   if (caseValueType != flagType)
     return op.emitOpError()
@@ -1359,22 +1359,22 @@ static LogicalResult verify(SwitchOp op) {
 Optional<MutableOperandRange>
 SwitchOp::getMutableSuccessorOperands(unsigned index) {
   assert(index < getNumSuccessors() && "invalid successor index");
-  return index == 0 ? defaultOperandsMutable()
+  return index == 0 ? getDefaultOperandsMutable()
                     : getCaseOperandsMutable(index - 1);
 }
 
 Block *SwitchOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
-  Optional<DenseIntElementsAttr> caseValues = case_values();
+  Optional<DenseIntElementsAttr> caseValues = getCaseValues();
 
   if (!caseValues)
-    return defaultDestination();
+    return getDefaultDestination();
 
-  SuccessorRange caseDests = caseDestinations();
+  SuccessorRange caseDests = getCaseDestinations();
   if (auto value = operands.front().dyn_cast_or_null<IntegerAttr>()) {
-    for (int64_t i = 0, size = case_values()->size(); i < size; ++i)
+    for (int64_t i = 0, size = getCaseValues()->size(); i < size; ++i)
       if (value == caseValues->getValue<IntegerAttr>(i))
         return caseDests[i];
-    return defaultDestination();
+    return getDefaultDestination();
   }
   return nullptr;
 }
@@ -1385,11 +1385,11 @@ Block *SwitchOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
 ///  -> br ^bb1
 static LogicalResult simplifySwitchWithOnlyDefault(SwitchOp op,
                                                    PatternRewriter &rewriter) {
-  if (!op.caseDestinations().empty())
+  if (!op.getCaseDestinations().empty())
     return failure();
 
-  rewriter.replaceOpWithNewOp<BranchOp>(op, op.defaultDestination(),
-                                        op.defaultOperands());
+  rewriter.replaceOpWithNewOp<BranchOp>(op, op.getDefaultDestination(),
+                                        op.getDefaultOperands());
   return success();
 }
 
@@ -1409,12 +1409,12 @@ dropSwitchCasesThatMatchDefault(SwitchOp op, PatternRewriter &rewriter) {
   SmallVector<ValueRange> newCaseOperands;
   SmallVector<APInt> newCaseValues;
   bool requiresChange = false;
-  auto caseValues = op.case_values();
-  auto caseDests = op.caseDestinations();
+  auto caseValues = op.getCaseValues();
+  auto caseDests = op.getCaseDestinations();
 
   for (int64_t i = 0, size = caseValues->size(); i < size; ++i) {
-    if (caseDests[i] == op.defaultDestination() &&
-        op.getCaseOperands(i) == op.defaultOperands()) {
+    if (caseDests[i] == op.getDefaultDestination() &&
+        op.getCaseOperands(i) == op.getDefaultOperands()) {
       requiresChange = true;
       continue;
     }
@@ -1426,9 +1426,9 @@ dropSwitchCasesThatMatchDefault(SwitchOp op, PatternRewriter &rewriter) {
   if (!requiresChange)
     return failure();
 
-  rewriter.replaceOpWithNewOp<SwitchOp>(op, op.flag(), op.defaultDestination(),
-                                        op.defaultOperands(), newCaseValues,
-                                        newCaseDestinations, newCaseOperands);
+  rewriter.replaceOpWithNewOp<SwitchOp>(
+      op, op.getFlag(), op.getDefaultDestination(), op.getDefaultOperands(),
+      newCaseValues, newCaseDestinations, newCaseOperands);
   return success();
 }
 
@@ -1441,16 +1441,16 @@ dropSwitchCasesThatMatchDefault(SwitchOp op, PatternRewriter &rewriter) {
 /// -> br ^bb2
 static void foldSwitch(SwitchOp op, PatternRewriter &rewriter,
                        APInt caseValue) {
-  auto caseValues = op.case_values();
+  auto caseValues = op.getCaseValues();
   for (int64_t i = 0, size = caseValues->size(); i < size; ++i) {
     if (caseValues->getValue<APInt>(i) == caseValue) {
-      rewriter.replaceOpWithNewOp<BranchOp>(op, op.caseDestinations()[i],
+      rewriter.replaceOpWithNewOp<BranchOp>(op, op.getCaseDestinations()[i],
                                             op.getCaseOperands(i));
       return;
     }
   }
-  rewriter.replaceOpWithNewOp<BranchOp>(op, op.defaultDestination(),
-                                        op.defaultOperands());
+  rewriter.replaceOpWithNewOp<BranchOp>(op, op.getDefaultDestination(),
+                                        op.getDefaultOperands());
 }
 
 /// switch %c_42 : i32, [
@@ -1462,7 +1462,7 @@ static void foldSwitch(SwitchOp op, PatternRewriter &rewriter,
 static LogicalResult simplifyConstSwitchValue(SwitchOp op,
                                               PatternRewriter &rewriter) {
   APInt caseValue;
-  if (!matchPattern(op.flag(), m_ConstantInt(&caseValue)))
+  if (!matchPattern(op.getFlag(), m_ConstantInt(&caseValue)))
     return failure();
 
   foldSwitch(op, rewriter, caseValue);
@@ -1485,8 +1485,8 @@ static LogicalResult simplifyPassThroughSwitch(SwitchOp op,
   SmallVector<Block *> newCaseDests;
   SmallVector<ValueRange> newCaseOperands;
   SmallVector<SmallVector<Value>> argStorage;
-  auto caseValues = op.case_values();
-  auto caseDests = op.caseDestinations();
+  auto caseValues = op.getCaseValues();
+  auto caseDests = op.getCaseDestinations();
   bool requiresChange = false;
   for (int64_t i = 0, size = caseValues->size(); i < size; ++i) {
     Block *caseDest = caseDests[i];
@@ -1499,8 +1499,8 @@ static LogicalResult simplifyPassThroughSwitch(SwitchOp op,
     newCaseOperands.push_back(caseOperands);
   }
 
-  Block *defaultDest = op.defaultDestination();
-  ValueRange defaultOperands = op.defaultOperands();
+  Block *defaultDest = op.getDefaultDestination();
+  ValueRange defaultOperands = op.getDefaultOperands();
   argStorage.emplace_back();
 
   if (succeeded(
@@ -1510,7 +1510,7 @@ static LogicalResult simplifyPassThroughSwitch(SwitchOp op,
   if (!requiresChange)
     return failure();
 
-  rewriter.replaceOpWithNewOp<SwitchOp>(op, op.flag(), defaultDest,
+  rewriter.replaceOpWithNewOp<SwitchOp>(op, op.getFlag(), defaultDest,
                                         defaultOperands, caseValues.getValue(),
                                         newCaseDests, newCaseOperands);
   return success();
@@ -1564,15 +1564,15 @@ simplifySwitchFromSwitchOnSameCondition(SwitchOp op,
   // and that it branches on the same condition and that this branch isn't the
   // default destination.
   auto predSwitch = dyn_cast<SwitchOp>(predecessor->getTerminator());
-  if (!predSwitch || op.flag() != predSwitch.flag() ||
-      predSwitch.defaultDestination() == currentBlock)
+  if (!predSwitch || op.getFlag() != predSwitch.getFlag() ||
+      predSwitch.getDefaultDestination() == currentBlock)
     return failure();
 
   // Fold this switch to an unconditional branch.
   APInt caseValue;
   bool isDefault = true;
-  SuccessorRange predDests = predSwitch.caseDestinations();
-  Optional<DenseIntElementsAttr> predCaseValues = predSwitch.case_values();
+  SuccessorRange predDests = predSwitch.getCaseDestinations();
+  Optional<DenseIntElementsAttr> predCaseValues = predSwitch.getCaseValues();
   for (int64_t i = 0, size = predCaseValues->size(); i < size; ++i) {
     if (currentBlock == predDests[i]) {
       caseValue = predCaseValues->getValue<APInt>(i);
@@ -1581,8 +1581,8 @@ simplifySwitchFromSwitchOnSameCondition(SwitchOp op,
     }
   }
   if (isDefault)
-    rewriter.replaceOpWithNewOp<BranchOp>(op, op.defaultDestination(),
-                                          op.defaultOperands());
+    rewriter.replaceOpWithNewOp<BranchOp>(op, op.getDefaultDestination(),
+                                          op.getDefaultOperands());
   else
     foldSwitch(op, rewriter, caseValue);
   return success();
@@ -1621,14 +1621,14 @@ simplifySwitchFromDefaultSwitchOnSameCondition(SwitchOp op,
   // and that it branches on the same condition and that this branch is the
   // default destination.
   auto predSwitch = dyn_cast<SwitchOp>(predecessor->getTerminator());
-  if (!predSwitch || op.flag() != predSwitch.flag() ||
-      predSwitch.defaultDestination() != currentBlock)
+  if (!predSwitch || op.getFlag() != predSwitch.getFlag() ||
+      predSwitch.getDefaultDestination() != currentBlock)
     return failure();
 
   // Delete case values that are not possible here.
   DenseSet<APInt> caseValuesToRemove;
-  auto predDests = predSwitch.caseDestinations();
-  auto predCaseValues = predSwitch.case_values();
+  auto predDests = predSwitch.getCaseDestinations();
+  auto predCaseValues = predSwitch.getCaseValues();
   for (int64_t i = 0, size = predCaseValues->size(); i < size; ++i)
     if (currentBlock != predDests[i])
       caseValuesToRemove.insert(predCaseValues->getValue<APInt>(i));
@@ -1638,8 +1638,8 @@ simplifySwitchFromDefaultSwitchOnSameCondition(SwitchOp op,
   SmallVector<APInt> newCaseValues;
   bool requiresChange = false;
 
-  auto caseValues = op.case_values();
-  auto caseDests = op.caseDestinations();
+  auto caseValues = op.getCaseValues();
+  auto caseDests = op.getCaseDestinations();
   for (int64_t i = 0, size = caseValues->size(); i < size; ++i) {
     if (caseValuesToRemove.contains(caseValues->getValue<APInt>(i))) {
       requiresChange = true;
@@ -1653,9 +1653,9 @@ simplifySwitchFromDefaultSwitchOnSameCondition(SwitchOp op,
   if (!requiresChange)
     return failure();
 
-  rewriter.replaceOpWithNewOp<SwitchOp>(op, op.flag(), op.defaultDestination(),
-                                        op.defaultOperands(), newCaseValues,
-                                        newCaseDestinations, newCaseOperands);
+  rewriter.replaceOpWithNewOp<SwitchOp>(
+      op, op.getFlag(), op.getDefaultDestination(), op.getDefaultOperands(),
+      newCaseValues, newCaseDestinations, newCaseOperands);
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp b/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
index 01b6a55be607b..1c7eb609c9d08 100644
--- a/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
@@ -29,11 +29,12 @@ class BufferizeSelectOp : public OpConversionPattern<SelectOp> {
   LogicalResult
   matchAndRewrite(SelectOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    if (!op.condition().getType().isa<IntegerType>())
+    if (!op.getCondition().getType().isa<IntegerType>())
       return rewriter.notifyMatchFailure(op, "requires scalar condition");
 
-    rewriter.replaceOpWithNewOp<SelectOp>(
-        op, adaptor.condition(), adaptor.true_value(), adaptor.false_value());
+    rewriter.replaceOpWithNewOp<SelectOp>(op, adaptor.getCondition(),
+                                          adaptor.getTrueValue(),
+                                          adaptor.getFalseValue());
     return success();
   }
 };
@@ -61,7 +62,7 @@ struct StdBufferizePass : public StdBufferizeBase<StdBufferizePass> {
     // touch the data).
     target.addDynamicallyLegalOp<SelectOp>([&](SelectOp op) {
       return typeConverter.isLegal(op.getType()) ||
-             !op.condition().getType().isa<IntegerType>();
+             !op.getCondition().getType().isa<IntegerType>();
     });
     if (failed(
             applyPartialConversion(getFunction(), target, std::move(patterns))))

diff  --git a/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp
index 085342f2bc9d7..25bac1ad8466c 100644
--- a/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp
@@ -46,7 +46,7 @@ struct AtomicRMWOpConverter : public OpRewritePattern<AtomicRMWOp> {
   LogicalResult matchAndRewrite(AtomicRMWOp op,
                                 PatternRewriter &rewriter) const final {
     arith::CmpFPredicate predicate;
-    switch (op.kind()) {
+    switch (op.getKind()) {
     case AtomicRMWKind::maxf:
       predicate = arith::CmpFPredicate::OGT;
       break;
@@ -58,13 +58,13 @@ struct AtomicRMWOpConverter : public OpRewritePattern<AtomicRMWOp> {
     }
 
     auto loc = op.getLoc();
-    auto genericOp =
-        rewriter.create<GenericAtomicRMWOp>(loc, op.memref(), op.indices());
+    auto genericOp = rewriter.create<GenericAtomicRMWOp>(loc, op.getMemref(),
+                                                         op.getIndices());
     OpBuilder bodyBuilder =
         OpBuilder::atBlockEnd(genericOp.getBody(), rewriter.getListener());
 
     Value lhs = genericOp.getCurrentValue();
-    Value rhs = op.value();
+    Value rhs = op.getValue();
     Value cmp = bodyBuilder.create<arith::CmpFOp>(loc, predicate, lhs, rhs);
     Value select = bodyBuilder.create<SelectOp>(loc, cmp, lhs, rhs);
     bodyBuilder.create<AtomicYieldOp>(loc, select);
@@ -126,8 +126,8 @@ struct MaxMinFOpConverter : public OpRewritePattern<OpTy> {
 
   LogicalResult matchAndRewrite(OpTy op,
                                 PatternRewriter &rewriter) const final {
-    Value lhs = op.lhs();
-    Value rhs = op.rhs();
+    Value lhs = op.getLhs();
+    Value rhs = op.getRhs();
 
     Location loc = op.getLoc();
     Value cmp = rewriter.create<arith::CmpFOp>(loc, pred, lhs, rhs);
@@ -153,8 +153,8 @@ struct MaxMinIOpConverter : public OpRewritePattern<OpTy> {
   using OpRewritePattern<OpTy>::OpRewritePattern;
   LogicalResult matchAndRewrite(OpTy op,
                                 PatternRewriter &rewriter) const final {
-    Value lhs = op.lhs();
-    Value rhs = op.rhs();
+    Value lhs = op.getLhs();
+    Value rhs = op.getRhs();
 
     Location loc = op.getLoc();
     Value cmp = rewriter.create<arith::CmpIOp>(loc, pred, lhs, rhs);
@@ -177,8 +177,8 @@ struct StdExpandOpsPass : public StdExpandOpsBase<StdExpandOpsPass> {
                            StandardOpsDialect>();
     target.addIllegalOp<arith::CeilDivSIOp, arith::FloorDivSIOp>();
     target.addDynamicallyLegalOp<AtomicRMWOp>([](AtomicRMWOp op) {
-      return op.kind() != AtomicRMWKind::maxf &&
-             op.kind() != AtomicRMWKind::minf;
+      return op.getKind() != AtomicRMWKind::maxf &&
+             op.getKind() != AtomicRMWKind::minf;
     });
     target.addDynamicallyLegalOp<memref::ReshapeOp>([](memref::ReshapeOp op) {
       return !op.shape().getType().cast<MemRefType>().hasStaticShape();

diff  --git a/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp b/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
index 8756fcf43dc02..58dda4860cca7 100644
--- a/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
+++ b/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
@@ -31,7 +31,7 @@ struct CallOpSignatureConversion : public OpConversionPattern<CallOp> {
     // Substitute with the new result types from the corresponding FuncType
     // conversion.
     rewriter.replaceOpWithNewOp<CallOp>(
-        callOp, callOp.callee(), convertedResults, adaptor.getOperands());
+        callOp, callOp.getCallee(), convertedResults, adaptor.getOperands());
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp b/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp
index 61f86090ab804..7c921774b84b9 100644
--- a/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp
+++ b/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp
@@ -28,7 +28,7 @@ memref::GlobalOp GlobalCreator::getGlobalFor(arith::ConstantOp constantOp) {
 
   // If we already have a global for this constant value, no need to do
   // anything else.
-  auto it = globals.find(constantOp.value());
+  auto it = globals.find(constantOp.getValue());
   if (it != globals.end())
     return cast<memref::GlobalOp>(it->second);
 
@@ -52,14 +52,14 @@ memref::GlobalOp GlobalCreator::getGlobalFor(arith::ConstantOp constantOp) {
       constantOp.getLoc(), (Twine("__constant_") + os.str()).str(),
       /*sym_visibility=*/globalBuilder.getStringAttr("private"),
       /*type=*/typeConverter.convertType(type).cast<MemRefType>(),
-      /*initial_value=*/constantOp.value().cast<ElementsAttr>(),
+      /*initial_value=*/constantOp.getValue().cast<ElementsAttr>(),
       /*constant=*/true,
       /*alignment=*/memrefAlignment);
   symbolTable.insert(global);
   // The symbol table inserts at the end of the module, but globals are a bit
   // nicer if they are at the beginning.
   global->moveBefore(&moduleOp.front());
-  globals[constantOp.value()] = global;
+  globals[constantOp.getValue()] = global;
   return global;
 }
 

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 1a278a54910a9..287289b4cd6d3 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -218,7 +218,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
 
 Optional<int64_t> DimOp::getConstantIndex() {
   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
-    return constantOp.value().cast<IntegerAttr>().getInt();
+    return constantOp.getValue().cast<IntegerAttr>().getInt();
   return {};
 }
 

diff  --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp
index b0ba6a8f94fb5..e2268569d5d43 100644
--- a/mlir/lib/Dialect/Vector/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/VectorOps.cpp
@@ -57,7 +57,7 @@ static MaskFormat get1DMaskFormat(Value mask) {
     // Inspect constant dense values. We count up for bits that
     // are set, count down for bits that are cleared, and bail
     // when a mix is detected.
-    if (auto denseElts = c.value().dyn_cast<DenseIntElementsAttr>()) {
+    if (auto denseElts = c.getValue().dyn_cast<DenseIntElementsAttr>()) {
       int64_t val = 0;
       for (bool b : denseElts.getValues<bool>())
         if (b && val >= 0)
@@ -790,7 +790,7 @@ struct CanonicalizeContractAdd : public OpRewritePattern<AddOpType> {
         return vector::ContractionOp();
       if (auto maybeZero = dyn_cast_or_null<arith::ConstantOp>(
               contractionOp.acc().getDefiningOp())) {
-        if (maybeZero.value() ==
+        if (maybeZero.getValue() ==
             rewriter.getZeroAttr(contractionOp.acc().getType())) {
           BlockAndValueMapping bvm;
           bvm.map(contractionOp.acc(), otherOperand);
@@ -2193,7 +2193,7 @@ class StridedSliceConstantFolder final
         extractStridedSliceOp.vector().getDefiningOp<arith::ConstantOp>();
     if (!constantOp)
       return failure();
-    auto dense = constantOp.value().dyn_cast<SplatElementsAttr>();
+    auto dense = constantOp.getValue().dyn_cast<SplatElementsAttr>();
     if (!dense)
       return failure();
     auto newAttr = DenseElementsAttr::get(extractStridedSliceOp.getType(),
@@ -2256,7 +2256,7 @@ class StridedSliceSplat final : public OpRewritePattern<ExtractStridedSliceOp> {
     auto splat = op.vector().getDefiningOp<SplatOp>();
     if (!splat)
       return failure();
-    rewriter.replaceOpWithNewOp<SplatOp>(op, op.getType(), splat.input());
+    rewriter.replaceOpWithNewOp<SplatOp>(op, op.getType(), splat.getInput());
     return success();
   }
 };
@@ -3652,7 +3652,7 @@ class ShapeCastConstantFolder final : public OpRewritePattern<ShapeCastOp> {
     if (!constantOp)
       return failure();
     // Only handle splat for now.
-    auto dense = constantOp.value().dyn_cast<SplatElementsAttr>();
+    auto dense = constantOp.getValue().dyn_cast<SplatElementsAttr>();
     if (!dense)
       return failure();
     auto newAttr = DenseElementsAttr::get(

diff  --git a/mlir/lib/Dialect/Vector/VectorUtils.cpp b/mlir/lib/Dialect/Vector/VectorUtils.cpp
index 2659a313c464b..e799c96fd6b09 100644
--- a/mlir/lib/Dialect/Vector/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/VectorUtils.cpp
@@ -325,14 +325,15 @@ bool mlir::isDisjointTransferIndices(VectorTransferOpInterface transferA,
     if (i < rankOffset) {
       // For leading dimensions, if we can prove that index are 
diff erent we
       // know we are accessing disjoint slices.
-      if (indexA.value().cast<IntegerAttr>().getInt() !=
-          indexB.value().cast<IntegerAttr>().getInt())
+      if (indexA.getValue().cast<IntegerAttr>().getInt() !=
+          indexB.getValue().cast<IntegerAttr>().getInt())
         return true;
     } else {
       // For this dimension, we slice a part of the memref we need to make sure
       // the intervals accessed don't overlap.
-      int64_t distance = std::abs(indexA.value().cast<IntegerAttr>().getInt() -
-                                  indexB.value().cast<IntegerAttr>().getInt());
+      int64_t distance =
+          std::abs(indexA.getValue().cast<IntegerAttr>().getInt() -
+                   indexB.getValue().cast<IntegerAttr>().getInt());
       if (distance >= transferA.getVectorType().getDimSize(i - rankOffset))
         return true;
     }

diff  --git a/mlir/lib/TableGen/Operator.cpp b/mlir/lib/TableGen/Operator.cpp
index 69e678728775b..ad309ce82faab 100644
--- a/mlir/lib/TableGen/Operator.cpp
+++ b/mlir/lib/TableGen/Operator.cpp
@@ -655,8 +655,25 @@ getGetterOrSetterNames(bool isGetter, const Operator &op, StringRef name) {
   SmallVector<std::string, 2> names;
   bool rawToo = prefixType == Dialect::EmitPrefix::Both;
 
+  // Whether to skip generating prefixed form for argument. This just does some
+  // basic checks.
+  //
+  // There are a little bit more invasive checks possible for cases where not
+  // all ops have the trait that would cause overlap. For many cases here,
+  // renaming would be better (e.g., we can only guard in limited manner against
+  // methods from traits and interfaces here, so avoiding these in op definition
+  // is safer).
   auto skip = [&](StringRef newName) {
-    bool shouldSkip = newName == "getOperands";
+    bool shouldSkip = newName == "getAttributeNames" ||
+                      newName == "getAttributes" || newName == "getOperation" ||
+                      newName == "getType";
+    if (newName == "getOperands") {
+      // To reduce noise, skip generating the prefixed form and the warning if
+      // $operands correspond to single variadic argument.
+      if (op.getNumOperands() == 1 && op.getNumVariableLengthOperands() == 1)
+        return true;
+      shouldSkip = true;
+    }
     if (!shouldSkip)
       return false;
 
@@ -677,11 +694,11 @@ getGetterOrSetterNames(bool isGetter, const Operator &op, StringRef name) {
     if (skip(names.back())) {
       rawToo = true;
       names.clear();
-    } else {
+    } else if (rawToo) {
       LLVM_DEBUG(llvm::errs() << "WITH_GETTER(\"" << op.getQualCppClassName()
-                              << "::" << names.back() << "\");\n"
+                              << "::" << name << "\")\n"
                               << "WITH_GETTER(\"" << op.getQualCppClassName()
-                              << "Adaptor::" << names.back() << "\");\n";);
+                              << "Adaptor::" << name << "\")\n";);
     }
   }
 

diff  --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
index 0d0f501785f42..038ad85335cd0 100644
--- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp
+++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
@@ -222,7 +222,7 @@ static LogicalResult printOperation(CppEmitter &emitter,
 static LogicalResult printOperation(CppEmitter &emitter,
                                     arith::ConstantOp constantOp) {
   Operation *operation = constantOp.getOperation();
-  Attribute value = constantOp.value();
+  Attribute value = constantOp.getValue();
 
   return printConstantOp(emitter, operation, value);
 }
@@ -230,7 +230,7 @@ static LogicalResult printOperation(CppEmitter &emitter,
 static LogicalResult printOperation(CppEmitter &emitter,
                                     mlir::ConstantOp constantOp) {
   Operation *operation = constantOp.getOperation();
-  Attribute value = constantOp.value();
+  Attribute value = constantOp.getValue();
 
   return printConstantOp(emitter, operation, value);
 }

diff  --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index ab37d60b6dbee..cd7865b6f87ae 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -339,10 +339,10 @@ GlobalOp Importer::processGlobal(llvm::GlobalVariable *GV) {
     b.create<ReturnOp>(op.getLoc(), ArrayRef<Value>({v}));
   }
   if (GV->hasAtLeastLocalUnnamedAddr())
-    op.unnamed_addrAttr(UnnamedAddrAttr::get(
+    op.setUnnamedAddrAttr(UnnamedAddrAttr::get(
         context, convertUnnamedAddrFromLLVM(GV->getUnnamedAddr())));
   if (GV->hasSection())
-    op.sectionAttr(b.getStringAttr(GV->getSection()));
+    op.setSectionAttr(b.getStringAttr(GV->getSection()));
 
   return globals[GV] = op;
 }

diff  --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
index 4c28a1f6c016d..f3ffc336eb738 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
@@ -304,8 +304,8 @@ convertOperationImpl(Operation &opInst, llvm::IRBuilderBase &builder,
     // TODO: refactor function type creation which usually occurs in std-LLVM
     // conversion.
     SmallVector<Type, 8> operandTypes;
-    operandTypes.reserve(inlineAsmOp.operands().size());
-    for (auto t : inlineAsmOp.operands().getTypes())
+    operandTypes.reserve(inlineAsmOp.getOperands().size());
+    for (auto t : inlineAsmOp.getOperands().getTypes())
       operandTypes.push_back(t);
 
     Type resultType;
@@ -330,7 +330,8 @@ convertOperationImpl(Operation &opInst, llvm::IRBuilderBase &builder,
                   inlineAsmOp.asm_string(), inlineAsmOp.constraints(),
                   inlineAsmOp.has_side_effects(), inlineAsmOp.is_align_stack());
     llvm::Value *result = builder.CreateCall(
-        inlineAsmInst, moduleTranslation.lookupValues(inlineAsmOp.operands()));
+        inlineAsmInst,
+        moduleTranslation.lookupValues(inlineAsmOp.getOperands()));
     if (opInst.getNumResults() != 0)
       moduleTranslation.mapValue(opInst.getResult(0), result);
     return success();
@@ -383,7 +384,7 @@ convertOperationImpl(Operation &opInst, llvm::IRBuilderBase &builder,
     return success();
   }
   if (auto condbrOp = dyn_cast<LLVM::CondBrOp>(opInst)) {
-    auto weights = condbrOp.branch_weights();
+    auto weights = condbrOp.getBranchWeights();
     llvm::MDNode *branchWeights = nullptr;
     if (weights) {
       // Map weight attributes to LLVM metadata.

diff  --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 94c20acd94fb5..12bdbad6b0255 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -366,8 +366,8 @@ static Value getPHISourceValue(Block *current, Block *pred,
     // For conditional branches, we take the operands from either the "true" or
     // the "false" branch.
     return condBranchOp.getSuccessor(0) == current
-               ? condBranchOp.trueDestOperands()[index]
-               : condBranchOp.falseDestOperands()[index];
+               ? condBranchOp.getTrueDestOperands()[index]
+               : condBranchOp.getFalseDestOperands()[index];
   }
 
   if (auto switchOp = dyn_cast<LLVM::SwitchOp>(terminator)) {
@@ -574,8 +574,8 @@ LogicalResult ModuleTranslation::convertGlobals() {
       }
     }
 
-    auto linkage = convertLinkageToLLVM(op.linkage());
-    auto addrSpace = op.addr_space();
+    auto linkage = convertLinkageToLLVM(op.getLinkage());
+    auto addrSpace = op.getAddrSpace();
 
     // LLVM IR requires constant with linkage other than external or weak
     // external to have initializers. If MLIR does not provide an initializer,
@@ -587,18 +587,18 @@ LogicalResult ModuleTranslation::convertGlobals() {
       cst = nullptr;
 
     auto *var = new llvm::GlobalVariable(
-        *llvmModule, type, op.constant(), linkage, cst, op.sym_name(),
+        *llvmModule, type, op.getConstant(), linkage, cst, op.getSymName(),
         /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, addrSpace);
 
-    if (op.unnamed_addr().hasValue())
-      var->setUnnamedAddr(convertUnnamedAddrToLLVM(*op.unnamed_addr()));
+    if (op.getUnnamedAddr().hasValue())
+      var->setUnnamedAddr(convertUnnamedAddrToLLVM(*op.getUnnamedAddr()));
 
-    if (op.section().hasValue())
-      var->setSection(*op.section());
+    if (op.getSection().hasValue())
+      var->setSection(*op.getSection());
 
-    addRuntimePreemptionSpecifier(op.dso_local(), var);
+    addRuntimePreemptionSpecifier(op.getDsoLocal(), var);
 
-    Optional<uint64_t> alignment = op.alignment();
+    Optional<uint64_t> alignment = op.getAlignment();
     if (alignment.hasValue())
       var->setAlignment(llvm::MaybeAlign(alignment.getValue()));
 
@@ -895,7 +895,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
       llvm::LLVMContext &ctx = llvmModule->getContext();
       llvm::SmallVector<llvm::Metadata *, 2> operands;
       operands.push_back({}); // Placeholder for self-reference
-      if (Optional<StringRef> description = op.description())
+      if (Optional<StringRef> description = op.getDescription())
         operands.push_back(llvm::MDString::get(ctx, description.getValue()));
       llvm::MDNode *domain = llvm::MDNode::get(ctx, operands);
       domain->replaceOperandWith(0, domain); // Self-reference for uniqueness
@@ -908,13 +908,13 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
       assert(isa<LLVM::MetadataOp>(op->getParentOp()));
       auto metadataOp = dyn_cast<LLVM::MetadataOp>(op->getParentOp());
       Operation *domainOp =
-          SymbolTable::lookupNearestSymbolFrom(metadataOp, op.domainAttr());
+          SymbolTable::lookupNearestSymbolFrom(metadataOp, op.getDomainAttr());
       llvm::MDNode *domain = aliasScopeDomainMetadataMapping.lookup(domainOp);
       assert(domain && "Scope's domain should already be valid");
       llvm::SmallVector<llvm::Metadata *, 3> operands;
       operands.push_back({}); // Placeholder for self-reference
       operands.push_back(domain);
-      if (Optional<StringRef> description = op.description())
+      if (Optional<StringRef> description = op.getDescription())
         operands.push_back(llvm::MDString::get(ctx, description.getValue()));
       llvm::MDNode *scope = llvm::MDNode::get(ctx, operands);
       scope->replaceOperandWith(0, scope); // Self-reference for uniqueness

diff  --git a/mlir/lib/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Transforms/BufferResultsToOutParams.cpp
index 73cc073b496b1..8ae443674ee9b 100644
--- a/mlir/lib/Transforms/BufferResultsToOutParams.cpp
+++ b/mlir/lib/Transforms/BufferResultsToOutParams.cpp
@@ -109,7 +109,7 @@ static LogicalResult updateCalls(ModuleOp module) {
     newOperands.append(outParams.begin(), outParams.end());
     auto newResultTypes = llvm::to_vector<6>(llvm::map_range(
         replaceWithNewCallResults, [](Value v) { return v.getType(); }));
-    auto newCall = builder.create<CallOp>(op.getLoc(), op.calleeAttr(),
+    auto newCall = builder.create<CallOp>(op.getLoc(), op.getCalleeAttr(),
                                           newResultTypes, newOperands);
     for (auto t : llvm::zip(replaceWithNewCallResults, newCall.getResults()))
       std::get<0>(t).replaceAllUsesWith(std::get<1>(t));

diff  --git a/mlir/lib/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Transforms/NormalizeMemRefs.cpp
index 148592a2c689e..4a4deb5d3586f 100644
--- a/mlir/lib/Transforms/NormalizeMemRefs.cpp
+++ b/mlir/lib/Transforms/NormalizeMemRefs.cpp
@@ -129,7 +129,7 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
 
   // Functions called by this function.
   funcOp.walk([&](CallOp callOp) {
-    StringAttr callee = callOp.getCalleeAttr();
+    StringAttr callee = callOp.getCalleeAttr().getAttr();
     for (FuncOp &funcOp : normalizableFuncs) {
       // We compare FuncOp and callee's name.
       if (callee == funcOp.getNameAttr()) {


        


More information about the Mlir-commits mailing list