[Mlir-commits] [mlir] 8df54a6 - [mlir] Update accessors to prefixed form (NFC)

Jacques Pienaar llvmlistbot at llvm.org
Sat Jun 18 17:53:29 PDT 2022


Author: Jacques Pienaar
Date: 2022-06-18T17:53:22-07:00
New Revision: 8df54a6a03a6d07e3053eff9806b450ec9193772

URL: https://github.com/llvm/llvm-project/commit/8df54a6a03a6d07e3053eff9806b450ec9193772
DIFF: https://github.com/llvm/llvm-project/commit/8df54a6a03a6d07e3053eff9806b450ec9193772.diff

LOG: [mlir] Update accessors to prefixed form (NFC)

Follow up from flipping dialects to both, flip accessor used to prefixed
variant ahead to flipping from _Both to _Prefixed. This just flips to
the accessors introduced in the preceding change which are just prefixed
forms of the existing accessor changed from.

Mechanical change using helper script
https://github.com/jpienaar/llvm-project/blob/main/clang-tools-extra/clang-tidy/misc/AddGetterCheck.cpp and clang-format.

Added: 
    

Modified: 
    flang/lib/Optimizer/Transforms/AffineDemotion.cpp
    mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
    mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp
    mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
    mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
    mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
    mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
    mlir/lib/Dialect/AMX/IR/AMXDialect.cpp
    mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp
    mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
    mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
    mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
    mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
    mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
    mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
    mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
    mlir/lib/Target/Cpp/TranslateToCpp.cpp

Removed: 
    


################################################################################
diff  --git a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
index 15d8a4ece69f8..96e0853b24b5e 100644
--- a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
@@ -50,7 +50,7 @@ class AffineLoadConversion : public OpConversionPattern<mlir::AffineLoadOp> {
   LogicalResult
   matchAndRewrite(mlir::AffineLoadOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    SmallVector<Value> indices(adaptor.indices());
+    SmallVector<Value> indices(adaptor.getIndices());
     auto maybeExpandedMap =
         expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
     if (!maybeExpandedMap)
@@ -58,7 +58,7 @@ class AffineLoadConversion : public OpConversionPattern<mlir::AffineLoadOp> {
 
     auto coorOp = rewriter.create<fir::CoordinateOp>(
         op.getLoc(), fir::ReferenceType::get(op.getResult().getType()),
-        adaptor.memref(), *maybeExpandedMap);
+        adaptor.getMemref(), *maybeExpandedMap);
 
     rewriter.replaceOpWithNewOp<fir::LoadOp>(op, coorOp.getResult());
     return success();
@@ -72,7 +72,7 @@ class AffineStoreConversion : public OpConversionPattern<mlir::AffineStoreOp> {
   LogicalResult
   matchAndRewrite(mlir::AffineStoreOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    SmallVector<Value> indices(op.indices());
+    SmallVector<Value> indices(op.getIndices());
     auto maybeExpandedMap =
         expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
     if (!maybeExpandedMap)
@@ -80,8 +80,8 @@ class AffineStoreConversion : public OpConversionPattern<mlir::AffineStoreOp> {
 
     auto coorOp = rewriter.create<fir::CoordinateOp>(
         op.getLoc(), fir::ReferenceType::get(op.getValueToStore().getType()),
-        adaptor.memref(), *maybeExpandedMap);
-    rewriter.replaceOpWithNewOp<fir::StoreOp>(op, adaptor.value(),
+        adaptor.getMemref(), *maybeExpandedMap);
+    rewriter.replaceOpWithNewOp<fir::StoreOp>(op, adaptor.getValue(),
                                               coorOp.getResult());
     return success();
   }

diff  --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
index b6f5c901adef7..b50b766d98b4b 100644
--- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
+++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
@@ -34,8 +34,8 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
   matchAndRewrite(GpuOp gpuOp, typename GpuOp::Adaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = gpuOp.getLoc();
-    Value memref = adaptor.memref();
-    Value unconvertedMemref = gpuOp.memref();
+    Value memref = adaptor.getMemref();
+    Value unconvertedMemref = gpuOp.getMemref();
     MemRefType memrefType = unconvertedMemref.getType().cast<MemRefType>();
 
     Value storeData = adaptor.getODSOperands(0)[0];
@@ -163,9 +163,9 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
     // swizzles) RDNA only
     // bits 30-31: Type (must be 0)
     uint32_t word3 = (7 << 12) | (4 << 15);
-    if (adaptor.targetIsRDNA()) {
+    if (adaptor.getTargetIsRDNA()) {
       word3 |= (1 << 24);
-      uint32_t oob = adaptor.boundsCheck() ? 1 : 2;
+      uint32_t oob = adaptor.getBoundsCheck() ? 1 : 2;
       word3 |= (oob << 28);
     }
     Value word3Const = createI32Constant(rewriter, loc, word3);
@@ -176,7 +176,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
 
     // Indexing (voffset)
     Value voffset;
-    for (auto &pair : llvm::enumerate(adaptor.indices())) {
+    for (auto &pair : llvm::enumerate(adaptor.getIndices())) {
       size_t i = pair.index();
       Value index = pair.value();
       Value strideOp;
@@ -191,8 +191,8 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
       voffset =
           voffset ? rewriter.create<LLVM::AddOp>(loc, voffset, index) : index;
     }
-    if (adaptor.indexOffset().hasValue()) {
-      int32_t indexOffset = *gpuOp.indexOffset() * elementByteWidth;
+    if (adaptor.getIndexOffset().hasValue()) {
+      int32_t indexOffset = *gpuOp.getIndexOffset() * elementByteWidth;
       Value extraOffsetConst = createI32Constant(rewriter, loc, indexOffset);
       voffset =
           voffset ? rewriter.create<LLVM::AddOp>(loc, voffset, extraOffsetConst)
@@ -200,7 +200,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
     }
     args.push_back(voffset);
 
-    Value sgprOffset = adaptor.sgprOffset();
+    Value sgprOffset = adaptor.getSgprOffset();
     if (!sgprOffset)
       sgprOffset = createI32Constant(rewriter, loc, 0);
     if (ShapedType::isDynamicStrideOrOffset(offset))

diff  --git a/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp b/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp
index e8c74c98319ff..a2f5641a0f5e2 100644
--- a/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp
+++ b/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp
@@ -28,19 +28,19 @@ class Sdot2dLoweringPattern : public OpRewritePattern<Sdot2dOp> {
   /// arm.neon.intr.sdot
   LogicalResult matchAndRewrite(Sdot2dOp op,
                                 PatternRewriter &rewriter) const override {
-    Type elemType = op.b().getType().cast<VectorType>().getElementType();
-    int length = op.b().getType().cast<VectorType>().getShape()[0] *
+    Type elemType = op.getB().getType().cast<VectorType>().getElementType();
+    int length = op.getB().getType().cast<VectorType>().getShape()[0] *
                  Sdot2dOp::kReductionSize;
     VectorType flattenedVectorType = VectorType::get({length}, elemType);
-    Value b2d = op.b();
-    Value c2d = op.c();
+    Value b2d = op.getB();
+    Value c2d = op.getC();
     Location loc = op.getLoc();
     Value b1d =
         rewriter.create<vector::ShapeCastOp>(loc, flattenedVectorType, b2d);
     Value c1d =
         rewriter.create<vector::ShapeCastOp>(loc, flattenedVectorType, c2d);
-    Value newOp =
-        rewriter.create<SdotOp>(loc, op.res().getType(), op.a(), b1d, c1d);
+    Value newOp = rewriter.create<SdotOp>(loc, op.getRes().getType(), op.getA(),
+                                          b1d, c1d);
     rewriter.replaceOp(op, {newOp});
     return success();
   }

diff  --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 41388a60eee2e..f980e52c3965b 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -221,14 +221,15 @@ struct MmaLdMatrixOpToNVVM : public ConvertOpToLLVMPattern<nvgpu::LdMatrixOp> {
       ldMatrixResultType = rewriter.getI32Type();
     }
 
-    auto srcMemrefType = op.srcMemref().getType().cast<MemRefType>();
-    Value srcPtr = getStridedElementPtr(loc, srcMemrefType, adaptor.srcMemref(),
-                                        adaptor.indices(), rewriter);
+    auto srcMemrefType = op.getSrcMemref().getType().cast<MemRefType>();
+    Value srcPtr =
+        getStridedElementPtr(loc, srcMemrefType, adaptor.getSrcMemref(),
+                             adaptor.getIndices(), rewriter);
     Value ldMatrixResult = rewriter.create<NVVM::LdMatrixOp>(
         loc, ldMatrixResultType, srcPtr,
-        /*num=*/op.numTiles(),
-        /*layout=*/op.transpose() ? NVVM::MMALayout::col
-                                  : NVVM::MMALayout::row);
+        /*num=*/op.getNumTiles(),
+        /*layout=*/op.getTranspose() ? NVVM::MMALayout::col
+                                     : NVVM::MMALayout::row);
 
     // The ldmatrix operation returns either a single i32 value or a struct of
     // i32 values. Here we unpack those values and cast them back to their
@@ -262,12 +263,12 @@ struct MmaSyncOptoNVVM : public ConvertOpToLLVMPattern<nvgpu::MmaSyncOp> {
     Location loc = op->getLoc();
     // Get the shapes of the MMAMatrix type being used. The shapes will
     // choose which intrinsic this op will be lowered to.
-    auto aType = op.matrixA().getType().cast<VectorType>();
-    auto cType = op.matrixC().getType().cast<VectorType>();
+    auto aType = op.getMatrixA().getType().cast<VectorType>();
+    auto cType = op.getMatrixC().getType().cast<VectorType>();
 
-    int64_t m = op.mmaShape()[0].cast<IntegerAttr>().getInt();
-    int64_t n = op.mmaShape()[1].cast<IntegerAttr>().getInt();
-    int64_t k = op.mmaShape()[2].cast<IntegerAttr>().getInt();
+    int64_t m = op.getMmaShape()[0].cast<IntegerAttr>().getInt();
+    int64_t n = op.getMmaShape()[1].cast<IntegerAttr>().getInt();
+    int64_t k = op.getMmaShape()[2].cast<IntegerAttr>().getInt();
     std::array<int64_t, 3> gemmShape{m, n, k};
 
     NVVM::MMATypes ptxTypeA;
@@ -302,11 +303,11 @@ struct MmaSyncOptoNVVM : public ConvertOpToLLVMPattern<nvgpu::MmaSyncOp> {
     }
 
     SmallVector<Value> matA =
-        unpackOperandVector(rewriter, loc, adaptor.matrixA(), ptxTypeA);
+        unpackOperandVector(rewriter, loc, adaptor.getMatrixA(), ptxTypeA);
     SmallVector<Value> matB =
-        unpackOperandVector(rewriter, loc, adaptor.matrixB(), ptxTypeB);
+        unpackOperandVector(rewriter, loc, adaptor.getMatrixB(), ptxTypeB);
     SmallVector<Value> matC =
-        unpackOperandVector(rewriter, loc, adaptor.matrixC(), *ptxTypeC);
+        unpackOperandVector(rewriter, loc, adaptor.getMatrixC(), *ptxTypeC);
 
     Type desiredRetTy = typeConverter->convertType(op->getResultTypes()[0]);
     Type intrinsicResTy = inferIntrinsicResultType(
@@ -359,18 +360,18 @@ struct NVGPUAsyncCopyLowering
   matchAndRewrite(nvgpu::DeviceAsyncCopyOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = op->getLoc();
-    auto dstMemrefType = op.dst().getType().cast<MemRefType>();
-    Value dstPtr = getStridedElementPtr(loc, dstMemrefType, adaptor.dst(),
-                                        adaptor.dstIndices(), rewriter);
+    auto dstMemrefType = op.getDst().getType().cast<MemRefType>();
+    Value dstPtr = getStridedElementPtr(loc, dstMemrefType, adaptor.getDst(),
+                                        adaptor.getDstIndices(), rewriter);
     auto i8Ty = IntegerType::get(op.getContext(), 8);
     auto dstPointerType =
         LLVM::LLVMPointerType::get(i8Ty, dstMemrefType.getMemorySpaceAsInt());
     dstPtr = rewriter.create<LLVM::BitcastOp>(loc, dstPointerType, dstPtr);
 
-    auto srcMemrefType = op.src().getType().cast<MemRefType>();
+    auto srcMemrefType = op.getSrc().getType().cast<MemRefType>();
 
-    Value scrPtr = getStridedElementPtr(loc, srcMemrefType, adaptor.src(),
-                                        adaptor.srcIndices(), rewriter);
+    Value scrPtr = getStridedElementPtr(loc, srcMemrefType, adaptor.getSrc(),
+                                        adaptor.getSrcIndices(), rewriter);
     auto srcPointerType =
         LLVM::LLVMPointerType::get(i8Ty, srcMemrefType.getMemorySpaceAsInt());
     scrPtr = rewriter.create<LLVM::BitcastOp>(loc, srcPointerType, scrPtr);
@@ -379,12 +380,13 @@ struct NVGPUAsyncCopyLowering
         i8Ty, NVVM::NVVMMemorySpace::kGlobalMemorySpace);
     scrPtr = rewriter.create<LLVM::AddrSpaceCastOp>(loc, srcPointerGlobalType,
                                                     scrPtr);
-    int64_t numElements = adaptor.numElements().getZExtValue();
+    int64_t numElements = adaptor.getNumElements().getZExtValue();
     int64_t sizeInBytes =
         (dstMemrefType.getElementTypeBitWidth() * numElements) / 8;
     // bypass L1 is only supported for byte sizes of 16, we drop the hint
     // otherwise.
-    UnitAttr bypassL1 = sizeInBytes == 16 ? adaptor.bypassL1Attr() : UnitAttr();
+    UnitAttr bypassL1 =
+        sizeInBytes == 16 ? adaptor.getBypassL1Attr() : UnitAttr();
     rewriter.create<NVVM::CpAsyncOp>(
         loc, dstPtr, scrPtr, rewriter.getI32IntegerAttr(sizeInBytes), bypassL1);
 
@@ -424,7 +426,7 @@ struct NVGPUAsyncWaitLowering
   matchAndRewrite(nvgpu::DeviceAsyncWaitOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // If numGroup is not present pick 0 as a conservative correct value.
-    int32_t numGroups = adaptor.numGroups() ? *adaptor.numGroups() : 0;
+    int32_t numGroups = adaptor.getNumGroups() ? *adaptor.getNumGroups() : 0;
     rewriter.create<NVVM::CpAsyncWaitGroupOp>(op.getLoc(), numGroups);
     rewriter.eraseOp(op);
     return success();

diff  --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index 6e74a90bc03d9..fc5bf877c9a91 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -106,7 +106,7 @@ static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
 // rewriting infrastructure.
 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp,
                                                      unsigned numDims) {
-  Region &limit = forOp.region();
+  Region &limit = forOp.getRegion();
   for (unsigned i = 0, e = numDims; i < e; ++i) {
     Operation *nested = &forOp.getBody()->front();
     if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) ||
@@ -320,7 +320,7 @@ static Value deriveStaticUpperBound(Value upperBound,
   }
 
   if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) {
-    for (const AffineExpr &result : minOp.map().getResults()) {
+    for (const AffineExpr &result : minOp.getMap().getResults()) {
       if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) {
         return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(),
                                                        constExpr.getValue());

diff  --git a/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp b/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
index 04795049c1d68..44ba0d0adaab4 100644
--- a/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
+++ b/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
@@ -45,7 +45,7 @@ class TensorExtractPattern final
   LogicalResult
   matchAndRewrite(tensor::ExtractOp extractOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    TensorType tensorType = extractOp.tensor().getType().cast<TensorType>();
+    TensorType tensorType = extractOp.getTensor().getType().cast<TensorType>();
 
     if (!tensorType.hasStaticShape())
       return rewriter.notifyMatchFailure(extractOp, "non-static tensor");
@@ -63,14 +63,14 @@ class TensorExtractPattern final
       strides[i] = strides[i + 1] * tensorType.getDimSize(i + 1);
     }
 
-    Type varType = spirv::PointerType::get(adaptor.tensor().getType(),
+    Type varType = spirv::PointerType::get(adaptor.getTensor().getType(),
                                            spirv::StorageClass::Function);
 
     spirv::VariableOp varOp;
-    if (adaptor.tensor().getDefiningOp<spirv::ConstantOp>()) {
+    if (adaptor.getTensor().getDefiningOp<spirv::ConstantOp>()) {
       varOp = rewriter.create<spirv::VariableOp>(
           loc, varType, spirv::StorageClass::Function,
-          /*initializer=*/adaptor.tensor());
+          /*initializer=*/adaptor.getTensor());
     } else {
       // Need to store the value to the local variable. It's questionable
       // whether we want to support such case though.
@@ -80,7 +80,7 @@ class TensorExtractPattern final
     auto &typeConverter = *getTypeConverter<SPIRVTypeConverter>();
     auto indexType = typeConverter.getIndexType();
 
-    Value index = spirv::linearizeIndex(adaptor.indices(), strides,
+    Value index = spirv::linearizeIndex(adaptor.getIndices(), strides,
                                         /*offset=*/0, indexType, loc, rewriter);
     auto acOp = rewriter.create<spirv::AccessChainOp>(loc, varOp, index);
 

diff  --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
index 08dc1351c4d23..68639da21428b 100644
--- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
+++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
@@ -31,14 +31,14 @@ void amdgpu::AMDGPUDialect::initialize() {
 //===----------------------------------------------------------------------===//
 template <typename T>
 static LogicalResult verifyRawBufferOp(T &op) {
-  MemRefType bufferType = op.memref().getType().template cast<MemRefType>();
+  MemRefType bufferType = op.getMemref().getType().template cast<MemRefType>();
   if (bufferType.getMemorySpaceAsInt() != 0)
     return op.emitOpError(
         "Buffer ops must operate on a memref in global memory");
   if (!bufferType.hasRank())
     return op.emitOpError(
         "Cannot meaningfully buffer_store to an unranked memref");
-  if (static_cast<int64_t>(op.indices().size()) != bufferType.getRank())
+  if (static_cast<int64_t>(op.getIndices().size()) != bufferType.getRank())
     return op.emitOpError("Expected " + Twine(bufferType.getRank()) +
                           " indices to memref");
   return success();

diff  --git a/mlir/lib/Dialect/AMX/IR/AMXDialect.cpp b/mlir/lib/Dialect/AMX/IR/AMXDialect.cpp
index 9ea96791cef4b..f0e434407c8a2 100644
--- a/mlir/lib/Dialect/AMX/IR/AMXDialect.cpp
+++ b/mlir/lib/Dialect/AMX/IR/AMXDialect.cpp
@@ -58,14 +58,14 @@ LogicalResult amx::TileZeroOp::verify() {
 
 LogicalResult amx::TileLoadOp::verify() {
   unsigned rank = getMemRefType().getRank();
-  if (indices().size() != rank)
+  if (getIndices().size() != rank)
     return emitOpError("requires ") << rank << " indices";
   return verifyTileSize(*this, getVectorType());
 }
 
 LogicalResult amx::TileStoreOp::verify() {
   unsigned rank = getMemRefType().getRank();
-  if (indices().size() != rank)
+  if (getIndices().size() != rank)
     return emitOpError("requires ") << rank << " indices";
   return verifyTileSize(*this, getVectorType());
 }

diff  --git a/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp
index e6949fa862e46..c19f8f182a923 100644
--- a/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp
+++ b/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp
@@ -112,10 +112,10 @@ struct TileLoadConversion : public ConvertOpToLLVMPattern<TileLoadOp> {
     if (failed(verifyStride(mType)))
       return failure();
     Value stride = getStride(rewriter, *getTypeConverter(), mType,
-                             adaptor.base(), op.getLoc());
+                             adaptor.getBase(), op.getLoc());
     // Replace operation with intrinsic.
-    Value ptr = getStridedElementPtr(op.getLoc(), mType, adaptor.base(),
-                                     adaptor.indices(), rewriter);
+    Value ptr = getStridedElementPtr(op.getLoc(), mType, adaptor.getBase(),
+                                     adaptor.getIndices(), rewriter);
     ptr = castPtr(rewriter, op.getLoc(), ptr);
     Type resType = typeConverter->convertType(vType);
     rewriter.replaceOpWithNewOp<amx::x86_amx_tileloadd64>(
@@ -139,13 +139,13 @@ struct TileStoreConversion : public ConvertOpToLLVMPattern<TileStoreOp> {
     if (failed(verifyStride(mType)))
       return failure();
     Value stride = getStride(rewriter, *getTypeConverter(), mType,
-                             adaptor.base(), op.getLoc());
+                             adaptor.getBase(), op.getLoc());
     // Replace operation with intrinsic.
-    Value ptr = getStridedElementPtr(op.getLoc(), mType, adaptor.base(),
-                                     adaptor.indices(), rewriter);
+    Value ptr = getStridedElementPtr(op.getLoc(), mType, adaptor.getBase(),
+                                     adaptor.getIndices(), rewriter);
     ptr = castPtr(rewriter, op.getLoc(), ptr);
     rewriter.replaceOpWithNewOp<amx::x86_amx_tilestored64>(
-        op, tsz.first, tsz.second, ptr, stride, adaptor.val());
+        op, tsz.first, tsz.second, ptr, stride, adaptor.getVal());
     return success();
   }
 };
@@ -166,8 +166,8 @@ struct TileMulFConversion : public ConvertOpToLLVMPattern<TileMulFOp> {
     // Replace operation with intrinsic.
     Type resType = typeConverter->convertType(cType);
     rewriter.replaceOpWithNewOp<amx::x86_amx_tdpbf16ps>(
-        op, resType, tsza.first, tszb.second, tsza.second, adaptor.acc(),
-        adaptor.lhs(), adaptor.rhs());
+        op, resType, tsza.first, tszb.second, tsza.second, adaptor.getAcc(),
+        adaptor.getLhs(), adaptor.getRhs());
     return success();
   }
 };
@@ -187,24 +187,24 @@ struct TileMulIConversion : public ConvertOpToLLVMPattern<TileMulIOp> {
         getTileSizes(rewriter, *getTypeConverter(), bType, op.getLoc());
     // Replace operation with intrinsic.
     Type resType = typeConverter->convertType(cType);
-    bool zexta = op.isZextLhs();
-    bool zextb = op.isZextRhs();
+    bool zexta = op.getIsZextLhs();
+    bool zextb = op.getIsZextRhs();
     if (zexta && zextb)
       rewriter.replaceOpWithNewOp<amx::x86_amx_tdpbuud>(
-          op, resType, tsza.first, tszb.second, tsza.second, adaptor.acc(),
-          adaptor.lhs(), adaptor.rhs());
+          op, resType, tsza.first, tszb.second, tsza.second, adaptor.getAcc(),
+          adaptor.getLhs(), adaptor.getRhs());
     else if (zexta && !zextb)
       rewriter.replaceOpWithNewOp<amx::x86_amx_tdpbusd>(
-          op, resType, tsza.first, tszb.second, tsza.second, adaptor.acc(),
-          adaptor.lhs(), adaptor.rhs());
+          op, resType, tsza.first, tszb.second, tsza.second, adaptor.getAcc(),
+          adaptor.getLhs(), adaptor.getRhs());
     else if (!zexta && zextb)
       rewriter.replaceOpWithNewOp<amx::x86_amx_tdpbsud>(
-          op, resType, tsza.first, tszb.second, tsza.second, adaptor.acc(),
-          adaptor.lhs(), adaptor.rhs());
+          op, resType, tsza.first, tszb.second, tsza.second, adaptor.getAcc(),
+          adaptor.getLhs(), adaptor.getRhs());
     else
       rewriter.replaceOpWithNewOp<amx::x86_amx_tdpbssd>(
-          op, resType, tsza.first, tszb.second, tsza.second, adaptor.acc(),
-          adaptor.lhs(), adaptor.rhs());
+          op, resType, tsza.first, tszb.second, tsza.second, adaptor.getAcc(),
+          adaptor.getLhs(), adaptor.getRhs());
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index d217f6bfe02c5..8883c4aaf6f93 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -65,9 +65,9 @@ ParseResult VoteBallotOp::parse(OpAsmParser &parser, OperationState &result) {
 void VoteBallotOp::print(OpAsmPrinter &p) { printNVVMIntrinsicOp(p, *this); }
 
 LogicalResult CpAsyncOp::verify() {
-  if (size() != 4 && size() != 8 && size() != 16)
+  if (getSize() != 4 && getSize() != 8 && getSize() != 16)
     return emitError("expected byte size to be either 4, 8 or 16.");
-  if (bypass_l1() && size() != 16)
+  if (getBypassL1() && getSize() != 16)
     return emitError("bypass l1 is only support for 16 bytes copy.");
   return success();
 }
@@ -140,8 +140,8 @@ void MmaOp::print(OpAsmPrinter &p) {
   };
 
   std::array<OperandFragment, 3> frags{
-      OperandFragment("A", multiplicandAPtxTypeAttrName()),
-      OperandFragment("B", multiplicandBPtxTypeAttrName()),
+      OperandFragment("A", getMultiplicandAPtxTypeAttrName()),
+      OperandFragment("B", getMultiplicandBPtxTypeAttrName()),
       OperandFragment("C", "")};
   SmallVector<StringRef, 4> ignoreAttrNames{
       mlir::NVVM::MmaOp::getOperandSegmentSizeAttr()};
@@ -184,7 +184,7 @@ void MmaOp::print(OpAsmPrinter &p) {
                                              frags[2].regs[0].getType()},
                         p);
   p << ")";
-  p.printArrowTypeList(TypeRange{this->res().getType()});
+  p.printArrowTypeList(TypeRange{this->getRes().getType()});
 }
 
 void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
@@ -355,8 +355,8 @@ LogicalResult MmaOp::verify() {
   auto s32x2StructTy =
       LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty});
 
-  std::array<int64_t, 3> mmaShape{shapeAttr().getM(), shapeAttr().getN(),
-                                  shapeAttr().getK()};
+  std::array<int64_t, 3> mmaShape{getShapeAttr().getM(), getShapeAttr().getN(),
+                                  getShapeAttr().getK()};
 
   // These variables define the set of allowed data types for matrices A, B, C,
   // and result.
@@ -373,7 +373,7 @@ LogicalResult MmaOp::verify() {
   if (mmaShape[0] == 16) {
     int64_t kFactor;
     Type multiplicandFragType;
-    switch (multiplicandAPtxType().getValue()) {
+    switch (getMultiplicandAPtxType().getValue()) {
     case MMATypes::tf32:
       kFactor = 4;
       multiplicandFragType = i32Ty;
@@ -400,10 +400,10 @@ LogicalResult MmaOp::verify() {
       break;
     default:
       return emitError("invalid shape or multiplicand type: " +
-                       stringifyEnum(multiplicandAPtxType().getValue()));
+                       stringifyEnum(getMultiplicandAPtxType().getValue()));
     }
 
-    if (isIntegerPtxType(multiplicandAPtxType().getValue())) {
+    if (isIntegerPtxType(getMultiplicandAPtxType().getValue())) {
       expectedResult.push_back(s32x4StructTy);
       expectedC.emplace_back(4, i32Ty);
       multiplicandFragType = i32Ty;
@@ -422,7 +422,7 @@ LogicalResult MmaOp::verify() {
 
   // In the M=8 case, there is only 1 possible case per data type.
   if (mmaShape[0] == 8) {
-    if (multiplicandAPtxType().getValue() == MMATypes::f16) {
+    if (getMultiplicandAPtxType().getValue() == MMATypes::f16) {
       expectedA.emplace_back(2, f16x2Ty);
       expectedB.emplace_back(2, f16x2Ty);
       expectedResult.push_back(f16x2x4StructTy);
@@ -431,7 +431,7 @@ LogicalResult MmaOp::verify() {
       expectedC.emplace_back(8, f32Ty);
       allowedShapes.push_back({8, 8, 4});
     }
-    if (multiplicandAPtxType().getValue() == MMATypes::f64) {
+    if (getMultiplicandAPtxType().getValue() == MMATypes::f64) {
       Type f64Ty = Float64Type::get(context);
       expectedA.emplace_back(1, f64Ty);
       expectedB.emplace_back(1, f64Ty);
@@ -441,16 +441,16 @@ LogicalResult MmaOp::verify() {
           context, SmallVector<Type>(2, f64Ty)));
       allowedShapes.push_back({8, 8, 4});
     }
-    if (isIntegerPtxType(multiplicandAPtxType().getValue())) {
+    if (isIntegerPtxType(getMultiplicandAPtxType().getValue())) {
       expectedA.push_back({i32Ty});
       expectedB.push_back({i32Ty});
       expectedC.push_back({i32Ty, i32Ty});
       expectedResult.push_back(s32x2StructTy);
-      if (isInt4PtxType(multiplicandAPtxType().getValue()))
+      if (isInt4PtxType(getMultiplicandAPtxType().getValue()))
         allowedShapes.push_back({8, 8, 32});
-      if (isInt8PtxType(multiplicandAPtxType().getValue()))
+      if (isInt8PtxType(getMultiplicandAPtxType().getValue()))
         allowedShapes.push_back({8, 8, 16});
-      if (multiplicandAPtxType().getValue() == MMATypes::b1)
+      if (getMultiplicandAPtxType().getValue() == MMATypes::b1)
         allowedShapes.push_back({8, 8, 128});
     }
   }
@@ -506,17 +506,19 @@ LogicalResult MmaOp::verify() {
   }
 
   // Ensure that binary MMA variants have a b1 MMA operation defined.
-  if (multiplicandAPtxType() == MMATypes::b1 && !b1Op().hasValue()) {
-    return emitOpError("op requires " + b1OpAttrName().strref() + " attribute");
+  if (getMultiplicandAPtxType() == MMATypes::b1 && !getB1Op().hasValue()) {
+    return emitOpError("op requires " + getB1OpAttrName().strref() +
+                       " attribute");
   }
 
   // Ensure int4/int8 MMA variants specify the accum overflow behavior
   // attribute.
-  if (isInt4PtxType(*multiplicandAPtxType()) ||
-      isInt8PtxType(*multiplicandAPtxType())) {
-    if (!intOverflowBehavior().hasValue())
+  if (isInt4PtxType(*getMultiplicandAPtxType()) ||
+      isInt8PtxType(*getMultiplicandAPtxType())) {
+    if (!getIntOverflowBehavior().hasValue())
       return emitOpError("op requires " +
-                         intOverflowBehaviorAttrName().strref() + " attribute");
+                         getIntOverflowBehaviorAttrName().strref() +
+                         " attribute");
   }
 
   return success();
@@ -561,16 +563,16 @@ std::pair<mlir::Type, unsigned> NVVM::inferMMAType(NVVM::MMATypes type,
 
 LogicalResult NVVM::WMMALoadOp::verify() {
   unsigned addressSpace =
-      ptr().getType().cast<LLVM::LLVMPointerType>().getAddressSpace();
+      getPtr().getType().cast<LLVM::LLVMPointerType>().getAddressSpace();
   if (addressSpace != 0 && addressSpace != 1 && addressSpace != 3)
     return emitOpError("expected source pointer in memory "
                        "space 0, 1, 3");
 
-  if (NVVM::WMMALoadOp::getIntrinsicID(m(), n(), k(), layout(), eltype(),
-                                       frag()) == 0)
+  if (NVVM::WMMALoadOp::getIntrinsicID(getM(), getN(), getK(), getLayout(),
+                                       getEltype(), getFrag()) == 0)
     return emitOpError() << "invalid attribute combination";
   std::pair<Type, unsigned> typeInfo =
-      inferMMAType(eltype(), frag(), getContext());
+      inferMMAType(getEltype(), getFrag(), getContext());
   Type dstType = LLVM::LLVMStructType::getLiteral(
       getContext(), SmallVector<Type, 8>(typeInfo.second, typeInfo.first));
   if (getType() != dstType)
@@ -581,18 +583,19 @@ LogicalResult NVVM::WMMALoadOp::verify() {
 
 LogicalResult NVVM::WMMAStoreOp::verify() {
   unsigned addressSpace =
-      ptr().getType().cast<LLVM::LLVMPointerType>().getAddressSpace();
+      getPtr().getType().cast<LLVM::LLVMPointerType>().getAddressSpace();
   if (addressSpace != 0 && addressSpace != 1 && addressSpace != 3)
     return emitOpError("expected operands to be a source pointer in memory "
                        "space 0, 1, 3");
 
-  if (NVVM::WMMAStoreOp::getIntrinsicID(m(), n(), k(), layout(), eltype()) == 0)
+  if (NVVM::WMMAStoreOp::getIntrinsicID(getM(), getN(), getK(), getLayout(),
+                                        getEltype()) == 0)
     return emitOpError() << "invalid attribute combination";
   std::pair<Type, unsigned> typeInfo =
-      inferMMAType(eltype(), NVVM::MMAFrag::c, getContext());
-  if (args().size() != typeInfo.second)
+      inferMMAType(getEltype(), NVVM::MMAFrag::c, getContext());
+  if (getArgs().size() != typeInfo.second)
     return emitOpError() << "expected " << typeInfo.second << " data operands";
-  if (llvm::any_of(args(), [&typeInfo](Value operands) {
+  if (llvm::any_of(getArgs(), [&typeInfo](Value operands) {
         return operands.getType() != typeInfo.first;
       }))
     return emitOpError() << "expected data operands of type " << typeInfo.first;
@@ -600,24 +603,25 @@ LogicalResult NVVM::WMMAStoreOp::verify() {
 }
 
 LogicalResult NVVM::WMMAMmaOp::verify() {
-  if (NVVM::WMMAMmaOp::getIntrinsicID(m(), n(), k(), layoutA(), layoutB(),
-                                      eltypeA(), eltypeB()) == 0)
+  if (NVVM::WMMAMmaOp::getIntrinsicID(getM(), getN(), getK(), getLayoutA(),
+                                      getLayoutB(), getEltypeA(),
+                                      getEltypeB()) == 0)
     return emitOpError() << "invalid attribute combination";
   std::pair<Type, unsigned> typeInfoA =
-      inferMMAType(eltypeA(), NVVM::MMAFrag::a, getContext());
+      inferMMAType(getEltypeA(), NVVM::MMAFrag::a, getContext());
   std::pair<Type, unsigned> typeInfoB =
-      inferMMAType(eltypeA(), NVVM::MMAFrag::b, getContext());
+      inferMMAType(getEltypeA(), NVVM::MMAFrag::b, getContext());
   std::pair<Type, unsigned> typeInfoC =
-      inferMMAType(eltypeB(), NVVM::MMAFrag::c, getContext());
+      inferMMAType(getEltypeB(), NVVM::MMAFrag::c, getContext());
   SmallVector<Type, 32> arguments;
   arguments.append(typeInfoA.second, typeInfoA.first);
   arguments.append(typeInfoB.second, typeInfoB.first);
   arguments.append(typeInfoC.second, typeInfoC.first);
   unsigned numArgs = arguments.size();
-  if (args().size() != numArgs)
+  if (getArgs().size() != numArgs)
     return emitOpError() << "expected " << numArgs << " arguments";
   for (unsigned i = 0; i < numArgs; i++) {
-    if (args()[i].getType() != arguments[i])
+    if (getArgs()[i].getType() != arguments[i])
       return emitOpError() << "expected argument " << i << " to be of type "
                            << arguments[i];
   }
@@ -631,22 +635,22 @@ LogicalResult NVVM::WMMAMmaOp::verify() {
 
 LogicalResult NVVM::LdMatrixOp::verify() {
   unsigned addressSpace =
-      ptr().getType().cast<LLVM::LLVMPointerType>().getAddressSpace();
+      getPtr().getType().cast<LLVM::LLVMPointerType>().getAddressSpace();
   if (addressSpace != 3)
     return emitOpError("expected source pointer in memory space 3");
 
-  if (num() != 1 && num() != 2 && num() != 4)
+  if (getNum() != 1 && getNum() != 2 && getNum() != 4)
     return emitOpError("expected num attribute to be 1, 2 or 4");
 
   Type i32 = IntegerType::get(getContext(), 32);
-  if (num() == 1 && getType() != i32)
+  if (getNum() == 1 && getType() != i32)
     return emitOpError("expected destination type is i32");
-  if (num() == 2 || num() == 4) {
+  if (getNum() == 2 || getNum() == 4) {
     Type dstType = LLVM::LLVMStructType::getLiteral(
-        getContext(), SmallVector<Type>(num(), i32));
+        getContext(), SmallVector<Type>(getNum(), i32));
     if (getType() != dstType)
       return emitOpError("expected destination type is a structure of ")
-             << num() << " elements of type i32";
+             << getNum() << " elements of type i32";
   }
   return success();
 }

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
index 87c574eb8f672..338e71517f4cc 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
@@ -81,7 +81,7 @@ ParseResult MubufStoreOp::parse(OpAsmParser &parser, OperationState &result) {
 }
 
 void MubufStoreOp::print(OpAsmPrinter &p) {
-  p << " " << getOperands() << " : " << vdata().getType();
+  p << " " << getOperands() << " : " << getVdata().getType();
 }
 
 // <operation> ::=
@@ -103,7 +103,7 @@ ParseResult RawBufferLoadOp::parse(OpAsmParser &parser,
 }
 
 void RawBufferLoadOp::print(OpAsmPrinter &p) {
-  p << " " << getOperands() << " : " << res().getType();
+  p << " " << getOperands() << " : " << getRes().getType();
 }
 
 // <operation> ::=
@@ -127,7 +127,7 @@ ParseResult RawBufferStoreOp::parse(OpAsmParser &parser,
 }
 
 void RawBufferStoreOp::print(OpAsmPrinter &p) {
-  p << " " << getOperands() << " : " << vdata().getType();
+  p << " " << getOperands() << " : " << getVdata().getType();
 }
 
 // <operation> ::=
@@ -151,7 +151,7 @@ ParseResult RawBufferAtomicFAddOp::parse(OpAsmParser &parser,
 }
 
 void RawBufferAtomicFAddOp::print(mlir::OpAsmPrinter &p) {
-  p << " " << getOperands() << " : " << vdata().getType();
+  p << " " << getOperands() << " : " << getVdata().getType();
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp b/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
index 1a781ef364ffe..c31a168cd2103 100644
--- a/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
+++ b/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
@@ -66,8 +66,8 @@ static bool isLastMemrefDimUnitStride(MemRefType type) {
 }
 
 LogicalResult DeviceAsyncCopyOp::verify() {
-  auto srcMemref = src().getType().cast<MemRefType>();
-  auto dstMemref = dst().getType().cast<MemRefType>();
+  auto srcMemref = getSrc().getType().cast<MemRefType>();
+  auto dstMemref = getDst().getType().cast<MemRefType>();
   unsigned workgroupAddressSpace = gpu::GPUDialect::getWorkgroupAddressSpace();
   if (!isLastMemrefDimUnitStride(srcMemref))
     return emitError("source memref most minor dim must have unit stride");
@@ -78,12 +78,13 @@ LogicalResult DeviceAsyncCopyOp::verify() {
            << workgroupAddressSpace;
   if (dstMemref.getElementType() != srcMemref.getElementType())
     return emitError("source and destination must have the same element type");
-  if (size_t(srcMemref.getRank()) != srcIndices().size())
+  if (size_t(srcMemref.getRank()) != getSrcIndices().size())
     return emitOpError() << "expected " << srcMemref.getRank()
-                         << " source indices, got " << srcIndices().size();
-  if (size_t(dstMemref.getRank()) != dstIndices().size())
+                         << " source indices, got " << getSrcIndices().size();
+  if (size_t(dstMemref.getRank()) != getDstIndices().size())
     return emitOpError() << "expected " << dstMemref.getRank()
-                         << " destination indices, got " << dstIndices().size();
+                         << " destination indices, got "
+                         << getDstIndices().size();
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
index 3d01e2ee0998e..1760bde459488 100644
--- a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
+++ b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
@@ -101,9 +101,9 @@ static void transformIndices(OpBuilder &builder, Location loc,
 
 Operation::operand_range getIndices(Operation *op) {
   if (auto ldmatrixOp = dyn_cast<LdMatrixOp>(op))
-    return ldmatrixOp.indices();
+    return ldmatrixOp.getIndices();
   if (auto copyOp = dyn_cast<DeviceAsyncCopyOp>(op))
-    return copyOp.dstIndices();
+    return copyOp.getDstIndices();
   if (auto loadOp = dyn_cast<memref::LoadOp>(op))
     return loadOp.indices();
   if (auto storeOp = dyn_cast<memref::StoreOp>(op))
@@ -117,9 +117,9 @@ Operation::operand_range getIndices(Operation *op) {
 
 void setIndices(Operation *op, ArrayRef<Value> indices) {
   if (auto ldmatrixOp = dyn_cast<LdMatrixOp>(op))
-    return ldmatrixOp.indicesMutable().assign(indices);
+    return ldmatrixOp.getIndicesMutable().assign(indices);
   if (auto copyOp = dyn_cast<DeviceAsyncCopyOp>(op))
-    return copyOp.dstIndicesMutable().assign(indices);
+    return copyOp.getDstIndicesMutable().assign(indices);
   if (auto loadOp = dyn_cast<memref::LoadOp>(op))
     return loadOp.indicesMutable().assign(indices);
   if (auto storeOp = dyn_cast<memref::StoreOp>(op))

diff  --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
index 4bff6b56e240c..71ca88dffdd9b 100644
--- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -1036,7 +1036,7 @@ static bool areEquivalentExtractSliceOps(const AnalysisState &state,
   if (!st || !sti)
     return false;
   if (st != sti &&
-      !state.areEquivalentBufferizedValues(st.source(), sti.getDest()))
+      !state.areEquivalentBufferizedValues(st.getSource(), sti.getDest()))
     return false;
   if (!sameOffsetsSizesAndStrides(st, sti, isEqualConstantIntOrValue))
     return false;

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 06168d5ef2c7f..a8deeaf8a9eeb 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -407,7 +407,7 @@ class SparseTensorToDimSizeConverter
   matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // Only rewrite annotated DimOp with constant index.
-    auto enc = getSparseTensorEncoding(op.source().getType());
+    auto enc = getSparseTensorEncoding(op.getSource().getType());
     if (!enc)
       return failure();
     Optional<int64_t> index = op.getConstantIndex();
@@ -429,7 +429,7 @@ class SparseCastConverter : public OpConversionPattern<tensor::CastOp> {
                   ConversionPatternRewriter &rewriter) const override {
     // Only rewrite identically annotated source/dest.
     auto encDst = getSparseTensorEncoding(op.getType());
-    auto encSrc = getSparseTensorEncoding(op.source().getType());
+    auto encSrc = getSparseTensorEncoding(op.getSource().getType());
     if (!encDst || encDst != encSrc)
       return failure();
     rewriter.replaceOp(op, adaptor.getOperands());
@@ -511,7 +511,7 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = op->getLoc();
     Type resType = op.getType();
-    Type srcType = op.source().getType();
+    Type srcType = op.getSource().getType();
     auto encDst = getSparseTensorEncoding(resType);
     auto encSrc = getSparseTensorEncoding(srcType);
     Value src = adaptor.getOperands()[0];
@@ -771,7 +771,7 @@ class SparseTensorLoadConverter : public OpConversionPattern<LoadOp> {
   LogicalResult
   matchAndRewrite(LoadOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    if (op.hasInserts()) {
+    if (op.getHasInserts()) {
       // Finalize any pending insertions.
       StringRef name = "endInsert";
       TypeRange noTp;
@@ -790,7 +790,7 @@ class SparseTensorLexInsertConverter : public OpConversionPattern<LexInsertOp> {
   LogicalResult
   matchAndRewrite(LexInsertOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    Type elemTp = op.tensor().getType().cast<ShapedType>().getElementType();
+    Type elemTp = op.getTensor().getType().cast<ShapedType>().getElementType();
     SmallString<12> name{"lexInsert", primaryTypeFunctionSuffix(elemTp)};
     TypeRange noTp;
     replaceOpWithFuncCall(rewriter, op, name, noTp, adaptor.getOperands(),
@@ -806,12 +806,12 @@ class SparseTensorExpandConverter : public OpConversionPattern<ExpandOp> {
   matchAndRewrite(ExpandOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = op->getLoc();
-    ShapedType srcType = op.tensor().getType().cast<ShapedType>();
+    ShapedType srcType = op.getTensor().getType().cast<ShapedType>();
     Type eltType = srcType.getElementType();
     Type boolType = rewriter.getIntegerType(1);
     Type idxType = rewriter.getIndexType();
     // All initialization should be done on entry of the loop nest.
-    rewriter.setInsertionPointAfter(op.tensor().getDefiningOp());
+    rewriter.setInsertionPointAfter(op.getTensor().getDefiningOp());
     // Determine the size for access expansion.
     auto enc = getSparseTensorEncoding(srcType);
     Value src = adaptor.getOperands()[0];
@@ -852,7 +852,7 @@ class SparseTensorCompressConverter : public OpConversionPattern<CompressOp> {
     // all-zero/false by only iterating over the set elements, so the
     // complexity remains proportional to the sparsity of the expanded
     // access pattern.
-    Type elemTp = op.tensor().getType().cast<ShapedType>().getElementType();
+    Type elemTp = op.getTensor().getType().cast<ShapedType>().getElementType();
     SmallString<12> name{"expInsert", primaryTypeFunctionSuffix(elemTp)};
     TypeRange noTp;
     replaceOpWithFuncCall(rewriter, op, name, noTp, adaptor.getOperands(),
@@ -880,7 +880,7 @@ class SparseTensorOutConverter : public OpConversionPattern<OutOp> {
   matchAndRewrite(OutOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = op->getLoc();
-    ShapedType srcType = op.tensor().getType().cast<ShapedType>();
+    ShapedType srcType = op.getTensor().getType().cast<ShapedType>();
     // Convert to default permuted COO.
     Value src = adaptor.getOperands()[0];
     auto encSrc = getSparseTensorEncoding(srcType);

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index 0e1617374ae31..f3056b9fd52bd 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -52,7 +52,7 @@ struct CastOpInterface
     auto castOp = cast<tensor::CastOp>(op);
 
     // The result buffer still has the old (pre-cast) type.
-    Value resultBuffer = getBuffer(rewriter, castOp.source(), options);
+    Value resultBuffer = getBuffer(rewriter, castOp.getSource(), options);
     auto sourceMemRefType = resultBuffer.getType().cast<BaseMemRefType>();
     Attribute memorySpace = sourceMemRefType.getMemorySpace();
     TensorType resultTensorType =
@@ -108,7 +108,7 @@ struct CollapseShapeOpInterface
                           const BufferizationOptions &options) const {
     auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
     RankedTensorType tensorResultType = collapseShapeOp.getResultType();
-    Value buffer = getBuffer(rewriter, collapseShapeOp.src(), options);
+    Value buffer = getBuffer(rewriter, collapseShapeOp.getSrc(), options);
     auto bufferType = buffer.getType().cast<MemRefType>();
 
     if (tensorResultType.getRank() == 0) {
@@ -135,7 +135,7 @@ struct CollapseShapeOpInterface
       }
 
       replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
-          rewriter, op, resultType, buffer, collapseShapeOp.reassociation());
+          rewriter, op, resultType, buffer, collapseShapeOp.getReassociation());
       return success();
     }
 
@@ -148,8 +148,8 @@ struct CollapseShapeOpInterface
       // TODO: Create alloc_tensor ops during TensorCopyInsertion.
       AnalysisState analysisState(options);
       Value tensorAlloc = allocateTensorForShapedValue(
-          rewriter, op->getLoc(), collapseShapeOp.src(),
-          analysisState.isTensorYielded(collapseShapeOp.result()));
+          rewriter, op->getLoc(), collapseShapeOp.getSrc(),
+          analysisState.isTensorYielded(collapseShapeOp.getResult()));
       auto memrefType =
           MemRefType::get(collapseShapeOp.getSrcType().getShape(),
                           collapseShapeOp.getSrcType().getElementType(),
@@ -187,8 +187,9 @@ struct DimOpInterface
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           const BufferizationOptions &options) const {
     auto dimOp = cast<tensor::DimOp>(op);
-    auto v = getBuffer(rewriter, dimOp.source(), options);
-    replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, v, dimOp.index());
+    auto v = getBuffer(rewriter, dimOp.getSource(), options);
+    replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, v,
+                                                dimOp.getIndex());
     return success();
   }
 };
@@ -223,7 +224,7 @@ struct ExpandShapeOpInterface
                           const BufferizationOptions &options) const {
     auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
     auto tensorResultType = expandShapeOp.getResultType();
-    auto buffer = getBuffer(rewriter, expandShapeOp.src(), options);
+    auto buffer = getBuffer(rewriter, expandShapeOp.getSrc(), options);
 
     // Memref result type is inferred by the builder based on reassociation
     // indices and result shape.
@@ -267,10 +268,10 @@ struct ExtractSliceOpInterface
 
     // Even if this op was decided to bufferize out-of-place, do not insert the
     // buffer copy yet. This is done later in this function.
-    auto srcMemref = getBuffer(rewriter, extractSliceOp.source(), options);
+    auto srcMemref = getBuffer(rewriter, extractSliceOp.getSource(), options);
     auto srcMemrefType = srcMemref.getType().cast<MemRefType>();
     auto dstTensorType =
-        extractSliceOp.result().getType().cast<RankedTensorType>();
+        extractSliceOp.getResult().getType().cast<RankedTensorType>();
 
     // Expand offsets, sizes and strides to the full rank to handle the
     // rank-reducing case.
@@ -321,9 +322,9 @@ struct ExtractOpInterface
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           const BufferizationOptions &options) const {
     auto extractOp = cast<tensor::ExtractOp>(op);
-    Value srcMemref = getBuffer(rewriter, extractOp.tensor(), options);
+    Value srcMemref = getBuffer(rewriter, extractOp.getTensor(), options);
     replaceOpWithNewBufferizedOp<memref::LoadOp>(rewriter, op, srcMemref,
-                                                 extractOp.indices());
+                                                 extractOp.getIndices());
     return success();
   }
 };
@@ -365,8 +366,8 @@ struct FromElementsOpInterface
     // TODO: Create alloc_tensor ops during TensorCopyInsertion.
     AnalysisState analysisState(options);
     Value tensorAlloc = allocateTensorForShapedValue(
-        rewriter, loc, fromElementsOp.result(),
-        analysisState.isTensorYielded(fromElementsOp.result()),
+        rewriter, loc, fromElementsOp.getResult(),
+        analysisState.isTensorYielded(fromElementsOp.getResult()),
         /*copy=*/false);
     auto memrefType =
         MemRefType::get(tensorType.getShape(), tensorType.getElementType());
@@ -374,15 +375,15 @@ struct FromElementsOpInterface
         op->getLoc(), memrefType, tensorAlloc);
 
     // Case: tensor<0xelem_type>.
-    if (fromElementsOp.elements().empty()) {
+    if (fromElementsOp.getElements().empty()) {
       replaceOpWithBufferizedValues(rewriter, op, buffer);
       return success();
     }
 
     // Case: tensor<elem_type>.
     if (shape.empty()) {
-      rewriter.create<memref::StoreOp>(loc, fromElementsOp.elements().front(),
-                                       buffer);
+      rewriter.create<memref::StoreOp>(
+          loc, fromElementsOp.getElements().front(), buffer);
       replaceOpWithBufferizedValues(rewriter, op, buffer);
       return success();
     }
@@ -395,7 +396,7 @@ struct FromElementsOpInterface
       constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
 
     // Traverse all `elements` and create `memref.store` ops.
-    auto elementIt = fromElementsOp.elements().begin();
+    auto elementIt = fromElementsOp.getElements().begin();
     SmallVector<Value, 2> indices(tensorType.getRank(), constants[0]);
     createStores(rewriter, loc, /*dim=*/0, buffer, shape, constants, elementIt,
                  indices);
@@ -418,8 +419,8 @@ struct GenerateOpInterface
     // TODO: Create alloc_tensor ops during TensorCopyInsertion.
     AnalysisState analysisState(options);
     Value tensorAlloc = allocateTensorForShapedValue(
-        rewriter, loc, generateOp.result(),
-        analysisState.isTensorYielded(generateOp.result()),
+        rewriter, loc, generateOp.getResult(),
+        analysisState.isTensorYielded(generateOp.getResult()),
         /*copy=*/false);
     auto memrefType =
         MemRefType::get(tensorType.getShape(), tensorType.getElementType());
@@ -435,10 +436,11 @@ struct GenerateOpInterface
     SmallVector<Value, 4> upperBounds;
     int nextDynamicIndex = 0;
     for (int i = 0; i < rank; i++) {
-      Value upperBound = memrefType.isDynamicDim(i)
-                             ? generateOp.dynamicExtents()[nextDynamicIndex++]
-                             : rewriter.create<arith::ConstantIndexOp>(
-                                   loc, memrefType.getDimSize(i));
+      Value upperBound =
+          memrefType.isDynamicDim(i)
+              ? generateOp.getDynamicExtents()[nextDynamicIndex++]
+              : rewriter.create<arith::ConstantIndexOp>(
+                    loc, memrefType.getDimSize(i));
       upperBounds.push_back(upperBound);
     }
 
@@ -495,9 +497,9 @@ struct InsertOpInterface
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           const BufferizationOptions &options) const {
     auto insertOp = cast<tensor::InsertOp>(op);
-    Value destMemref = getBuffer(rewriter, insertOp.dest(), options);
-    rewriter.create<memref::StoreOp>(insertOp.getLoc(), insertOp.scalar(),
-                                     destMemref, insertOp.indices());
+    Value destMemref = getBuffer(rewriter, insertOp.getDest(), options);
+    rewriter.create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
+                                     destMemref, insertOp.getIndices());
     replaceOpWithBufferizedValues(rewriter, op, destMemref);
     return success();
   }
@@ -519,7 +521,7 @@ static bool areEquivalentExtractSliceOps(const AnalysisState &state,
   if (!st || !sti)
     return false;
   if (sti != sti &&
-      !state.areEquivalentBufferizedValues(st.source(), sti.dest()))
+      !state.areEquivalentBufferizedValues(st.getSource(), sti.getDest()))
     return false;
   if (!sameOffsetsSizesAndStrides(st, sti, isEqualConstantIntOrValue))
     return false;
@@ -636,8 +638,8 @@ struct InsertSliceOpInterface
       // is no memory write here.)
       if (uConflictingWrite == &insertSliceOp->getOpOperand(1) /*dest*/ &&
           state.areEquivalentBufferizedValues(uRead->get(),
-                                              insertSliceOp.source()) &&
-          hasMatchingExtractSliceOp(state, insertSliceOp.source(),
+                                              insertSliceOp.getSource()) &&
+          hasMatchingExtractSliceOp(state, insertSliceOp.getSource(),
                                     insertSliceOp))
         return true;
 
@@ -653,7 +655,7 @@ struct InsertSliceOpInterface
     // TODO: be very loud about it or even consider failing the pass.
     auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
     Location loc = insertSliceOp.getLoc();
-    Value dstMemref = getBuffer(rewriter, insertSliceOp.dest(), options);
+    Value dstMemref = getBuffer(rewriter, insertSliceOp.getDest(), options);
 
     // Expand offsets, sizes and strides to the full rank to handle the
     // rank-reducing case.
@@ -681,7 +683,7 @@ struct InsertSliceOpInterface
 
     // Copy tensor. If this tensor.insert_slice has a matching
     // tensor.extract_slice, the copy operation will eventually fold away.
-    auto srcMemref = getBuffer(rewriter, insertSliceOp.source(), options);
+    auto srcMemref = getBuffer(rewriter, insertSliceOp.getSource(), options);
     if (failed(options.createMemCpy(rewriter, loc, srcMemref, subView)))
       return failure();
 
@@ -712,7 +714,7 @@ struct RankOpInterface
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           const BufferizationOptions &options) const {
     auto rankOp = cast<tensor::RankOp>(op);
-    auto v = getBuffer(rewriter, rankOp.tensor(), options);
+    auto v = getBuffer(rewriter, rankOp.getTensor(), options);
     replaceOpWithNewBufferizedOp<memref::RankOp>(rewriter, op, rankOp.getType(),
                                                  v);
     return success();
@@ -748,8 +750,8 @@ struct ReshapeOpInterface
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           const BufferizationOptions &options) const {
     auto reshapeOp = cast<tensor::ReshapeOp>(op);
-    Value srcBuffer = getBuffer(rewriter, reshapeOp.source(), options);
-    Value shapeBuffer = getBuffer(rewriter, reshapeOp.shape(), options);
+    Value srcBuffer = getBuffer(rewriter, reshapeOp.getSource(), options);
+    Value shapeBuffer = getBuffer(rewriter, reshapeOp.getShape(), options);
     auto resultTensorType = reshapeOp.getResult().getType().cast<TensorType>();
     auto resultMemRefType = getMemRefType(resultTensorType, options);
     replaceOpWithNewBufferizedOp<memref::ReshapeOp>(

diff  --git a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
index 7b70e53a6e9c3..ac21f1714689d 100644
--- a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
+++ b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
@@ -29,13 +29,13 @@ void x86vector::X86VectorDialect::initialize() {
 }
 
 LogicalResult x86vector::MaskCompressOp::verify() {
-  if (src() && constant_src())
+  if (getSrc() && getConstantSrc())
     return emitError("cannot use both src and constant_src");
 
-  if (src() && (src().getType() != dst().getType()))
+  if (getSrc() && (getSrc().getType() != getDst().getType()))
     return emitError("failed to verify that src and dst have same type");
 
-  if (constant_src() && (constant_src()->getType() != dst().getType()))
+  if (getConstantSrc() && (getConstantSrc()->getType() != getDst().getType()))
     return emitError(
         "failed to verify that constant_src and dst have same type");
 

diff  --git a/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
index bdd8d1fd31e60..4df05b6b1d0f9 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
+++ b/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
@@ -22,11 +22,11 @@ using namespace mlir::x86vector;
 /// Extracts the "main" vector element type from the given X86Vector operation.
 template <typename OpTy>
 static Type getSrcVectorElementType(OpTy op) {
-  return op.src().getType().template cast<VectorType>().getElementType();
+  return op.getSrc().getType().template cast<VectorType>().getElementType();
 }
 template <>
 Type getSrcVectorElementType(Vp2IntersectOp op) {
-  return op.a().getType().template cast<VectorType>().getElementType();
+  return op.getA().getType().template cast<VectorType>().getElementType();
 }
 
 namespace {
@@ -70,21 +70,21 @@ struct MaskCompressOpConversion
   LogicalResult
   matchAndRewrite(MaskCompressOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    auto opType = adaptor.a().getType();
+    auto opType = adaptor.getA().getType();
 
     Value src;
-    if (op.src()) {
-      src = adaptor.src();
-    } else if (op.constant_src()) {
+    if (op.getSrc()) {
+      src = adaptor.getSrc();
+    } else if (op.getConstantSrc()) {
       src = rewriter.create<arith::ConstantOp>(op.getLoc(), opType,
-                                               op.constant_srcAttr());
+                                               op.getConstantSrcAttr());
     } else {
       Attribute zeroAttr = rewriter.getZeroAttr(opType);
       src = rewriter.create<arith::ConstantOp>(op->getLoc(), opType, zeroAttr);
     }
 
-    rewriter.replaceOpWithNewOp<MaskCompressIntrOp>(op, opType, adaptor.a(),
-                                                    src, adaptor.k());
+    rewriter.replaceOpWithNewOp<MaskCompressIntrOp>(op, opType, adaptor.getA(),
+                                                    src, adaptor.getK());
 
     return success();
   }
@@ -96,8 +96,8 @@ struct RsqrtOpConversion : public ConvertOpToLLVMPattern<RsqrtOp> {
   LogicalResult
   matchAndRewrite(RsqrtOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    auto opType = adaptor.a().getType();
-    rewriter.replaceOpWithNewOp<RsqrtIntrOp>(op, opType, adaptor.a());
+    auto opType = adaptor.getA().getType();
+    rewriter.replaceOpWithNewOp<RsqrtIntrOp>(op, opType, adaptor.getA());
     return success();
   }
 };
@@ -108,14 +108,14 @@ struct DotOpConversion : public ConvertOpToLLVMPattern<DotOp> {
   LogicalResult
   matchAndRewrite(DotOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    auto opType = adaptor.a().getType();
+    auto opType = adaptor.getA().getType();
     Type llvmIntType = IntegerType::get(&getTypeConverter()->getContext(), 8);
     // Dot product of all elements, broadcasted to all elements.
     auto attr = rewriter.getI8IntegerAttr(static_cast<int8_t>(0xff));
     Value scale =
         rewriter.create<LLVM::ConstantOp>(op.getLoc(), llvmIntType, attr);
-    rewriter.replaceOpWithNewOp<DotIntrOp>(op, opType, adaptor.a(), adaptor.b(),
-                                           scale);
+    rewriter.replaceOpWithNewOp<DotIntrOp>(op, opType, adaptor.getA(),
+                                           adaptor.getB(), scale);
     return success();
   }
 };

diff  --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
index 4eaa686e56d8f..f037b7d8febf7 100644
--- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp
+++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
@@ -217,7 +217,7 @@ static LogicalResult printConstantOp(CppEmitter &emitter, Operation *operation,
 static LogicalResult printOperation(CppEmitter &emitter,
                                     emitc::ConstantOp constantOp) {
   Operation *operation = constantOp.getOperation();
-  Attribute value = constantOp.value();
+  Attribute value = constantOp.getValue();
 
   return printConstantOp(emitter, operation, value);
 }
@@ -225,7 +225,7 @@ static LogicalResult printOperation(CppEmitter &emitter,
 static LogicalResult printOperation(CppEmitter &emitter,
                                     emitc::VariableOp variableOp) {
   Operation *operation = variableOp.getOperation();
-  Attribute value = variableOp.value();
+  Attribute value = variableOp.getValue();
 
   return printConstantOp(emitter, operation, value);
 }
@@ -330,7 +330,7 @@ static LogicalResult printOperation(CppEmitter &emitter, emitc::CallOp callOp) {
 
   if (failed(emitter.emitAssignPrefix(op)))
     return failure();
-  os << callOp.callee();
+  os << callOp.getCallee();
 
   auto emitArgs = [&](Attribute attr) -> LogicalResult {
     if (auto t = attr.dyn_cast<IntegerAttr>()) {
@@ -352,9 +352,10 @@ static LogicalResult printOperation(CppEmitter &emitter, emitc::CallOp callOp) {
     return success();
   };
 
-  if (callOp.template_args()) {
+  if (callOp.getTemplateArgs()) {
     os << "<";
-    if (failed(interleaveCommaWithError(*callOp.template_args(), os, emitArgs)))
+    if (failed(
+            interleaveCommaWithError(*callOp.getTemplateArgs(), os, emitArgs)))
       return failure();
     os << ">";
   }
@@ -362,8 +363,9 @@ static LogicalResult printOperation(CppEmitter &emitter, emitc::CallOp callOp) {
   os << "(";
 
   LogicalResult emittedArgs =
-      callOp.args() ? interleaveCommaWithError(*callOp.args(), os, emitArgs)
-                    : emitter.emitOperands(op);
+      callOp.getArgs()
+          ? interleaveCommaWithError(*callOp.getArgs(), os, emitArgs)
+          : emitter.emitOperands(op);
   if (failed(emittedArgs))
     return failure();
   os << ")";
@@ -377,7 +379,7 @@ static LogicalResult printOperation(CppEmitter &emitter,
 
   if (failed(emitter.emitAssignPrefix(op)))
     return failure();
-  os << applyOp.applicableOperator();
+  os << applyOp.getApplicableOperator();
   os << emitter.getOrCreateName(applyOp.getOperand());
 
   return success();
@@ -403,10 +405,10 @@ static LogicalResult printOperation(CppEmitter &emitter,
   raw_ostream &os = emitter.ostream();
 
   os << "#include ";
-  if (includeOp.is_standard_include())
-    os << "<" << includeOp.include() << ">";
+  if (includeOp.getIsStandardInclude())
+    os << "<" << includeOp.getInclude() << ">";
   else
-    os << "\"" << includeOp.include() << "\"";
+    os << "\"" << includeOp.getInclude() << "\"";
 
   return success();
 }


        


More information about the Mlir-commits mailing list