[Mlir-commits] [mlir] d3b3f76 - [mlir] Flip to prefixed accessors (NFC)

Jacques Pienaar llvmlistbot at llvm.org
Sun Aug 7 04:56:04 PDT 2022


Author: Jacques Pienaar
Date: 2022-08-07T04:55:58-07:00
New Revision: d3b3f7653dd4d1013117429c2fb40b6f7273157a

URL: https://github.com/llvm/llvm-project/commit/d3b3f7653dd4d1013117429c2fb40b6f7273157a
DIFF: https://github.com/llvm/llvm-project/commit/d3b3f7653dd4d1013117429c2fb40b6f7273157a.diff

LOG: [mlir] Flip to prefixed accessors (NFC)

Added: 
    

Modified: 
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
    mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
    mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
    mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
    mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
    mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
    mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
    mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
    mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
    mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
    mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
    mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 05a18d7d68dc4..b1da7ee7c975e 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -810,7 +810,7 @@ static LogicalResult reduceMatchAndRewriteHelper(Operation *op, uint64_t axis,
   auto initTensor = rewriter
                         .create<linalg::InitTensorOp>(loc, dynDims, reduceShape,
                                                       resultTy.getElementType())
-                        .result();
+                        .getResult();
 
   auto fillValueAttr = createInitialValueForReduceOp(op, elementTy, rewriter);
   if (!fillValueAttr)
@@ -1375,7 +1375,7 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
     rewriter.replaceOp(op, genericOp.getResult(0));
 
     OpBuilder::InsertionGuard regionGuard(rewriter);
-    rewriter.createBlock(&genericOp.region(), genericOp.region().end(),
+    rewriter.createBlock(&genericOp.getRegion(), genericOp.getRegion().end(),
                          TypeRange({resultElementTy}), loc);
     Value batch = rewriter.create<linalg::IndexOp>(loc, 0);
     Value y = rewriter.create<linalg::IndexOp>(loc, 1);
@@ -1732,7 +1732,7 @@ class ReverseConverter : public OpRewritePattern<tosa::ReverseOp> {
                           .create<linalg::InitTensorOp>(
                               loc, ArrayRef<Value>({dynDims}),
                               inputTy.getShape(), inputTy.getElementType())
-                          .result();
+                          .getResult();
     SmallVector<AffineMap, 2> affineMaps = {
         rewriter.getMultiDimIdentityMap(resultTy.getRank())};
 
@@ -1951,7 +1951,7 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
         rewriter
             .create<linalg::InitTensorOp>(loc, dynDims, resultTy.getShape(),
                                           outElementTy)
-            .result();
+            .getResult();
     auto fillValueIdx = rewriter.create<arith::ConstantOp>(
         loc, rewriter.getIntegerAttr(outElementTy, 0));
     auto filledTensorIdx =
@@ -1964,7 +1964,7 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
     auto initTensorMax = rewriter
                              .create<linalg::InitTensorOp>(
                                  loc, dynDims, resultTy.getShape(), inElementTy)
-                             .result();
+                             .getResult();
     auto fillValueMaxAttr =
         createInitialValueForReduceOp(argmaxOp, inElementTy, rewriter);
 
@@ -2063,7 +2063,7 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {
         rewriter
             .create<linalg::InitTensorOp>(loc, dynamicDims, resultTy.getShape(),
                                           resultElementTy)
-            .result();
+            .getResult();
 
     SmallVector<AffineMap, 2> affineMaps = {
         AffineMap::get(
@@ -2123,7 +2123,7 @@ class TableConverter : public OpRewritePattern<tosa::TableOp> {
         rewriter
             .create<linalg::InitTensorOp>(loc, dynDims, resultTy.getShape(),
                                           resultElementTy)
-            .result();
+            .getResult();
 
     SmallVector<AffineMap, 2> affineMaps = {
         rewriter.getMultiDimIdentityMap(resultTy.getRank()),
@@ -2137,7 +2137,7 @@ class TableConverter : public OpRewritePattern<tosa::TableOp> {
     {
       OpBuilder::InsertionGuard regionGuard(rewriter);
       Block *block = rewriter.createBlock(
-          &genericOp.region(), genericOp.region().end(),
+          &genericOp.getRegion(), genericOp.getRegion().end(),
           TypeRange({inputElementTy, resultElementTy}), {loc, loc});
 
       auto inputValue = block->getArgument(0);

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index a55b29cad363d..d22151327faaf 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -756,7 +756,7 @@ void GenericOp::print(OpAsmPrinter &p) {
   }
 
   // Printing is shared with named ops, except for the region and attributes
-  printCommonStructuredOpParts(p, inputs(), outputs());
+  printCommonStructuredOpParts(p, getInputs(), getOutputs());
 
   genericAttrNames.push_back("operand_segment_sizes");
   genericAttrNamesSet.insert(genericAttrNames.back());
@@ -773,13 +773,13 @@ void GenericOp::print(OpAsmPrinter &p) {
   }
 
   // Print region.
-  if (!region().empty()) {
+  if (!getRegion().empty()) {
     p << ' ';
-    p.printRegion(region());
+    p.printRegion(getRegion());
   }
 
   // Print results.
-  printNamedStructuredOpResults(p, result_tensors().getTypes());
+  printNamedStructuredOpResults(p, getResultTensors().getTypes());
 }
 
 ParseResult GenericOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -889,8 +889,8 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
     auto newOp = rewriter.create<GenericOp>(
         loc, newResultTypes, newInputOperands, newOutputOperands,
         rewriter.getAffineMapArrayAttr(newIndexingMaps),
-        genericOp.iterator_types(), genericOp.docAttr(),
-        genericOp.library_callAttr(),
+        genericOp.getIteratorTypes(), genericOp.getDocAttr(),
+        genericOp.getLibraryCallAttr(),
         [](OpBuilder & /*builder*/, Location /*loc*/, ValueRange /*args*/) {
           return;
         });
@@ -1045,9 +1045,9 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
       const llvm::SmallDenseMap<unsigned, unsigned> &origOutsToNewOutsPos,
       PatternRewriter &rewriter) const {
     // Merge the body of the original op with the new op.
-    Block *newOpBlock = &newOp.region().front();
+    Block *newOpBlock = &newOp.getRegion().front();
     assert(newOpBlock->empty() && "expected new op to have an empty payload");
-    Block *origOpBlock = &genericOp.region().front();
+    Block *origOpBlock = &genericOp.getRegion().front();
     SmallVector<Value> replacements(origOpBlock->getNumArguments(), nullptr);
 
     // Replace all arguments in the original op, with arguments from the
@@ -1084,7 +1084,7 @@ struct DeduplicateAndRemoveDeadOperandsAndResults
 
       SmallVector<Value> newYieldVals(newOp.getNumOutputs(), nullptr);
       for (const auto &yieldOpOperands :
-           llvm::enumerate(origYieldOp.values())) {
+           llvm::enumerate(origYieldOp.getValues())) {
         auto it = origOutsToNewOutsPos.find(yieldOpOperands.index());
         if (it == origOutsToNewOutsPos.end())
           continue;
@@ -1112,7 +1112,7 @@ struct EraseIdentityGenericOp : public OpRewritePattern<GenericOp> {
 
     // Check that the body of the linalg operation is just a linalg.yield
     // operation.
-    Block &body = genericOp.region().front();
+    Block &body = genericOp.getRegion().front();
     if (!llvm::hasSingleElement(body))
       return failure();
     auto yieldOp = dyn_cast<linalg::YieldOp>(body.getTerminator());
@@ -1133,7 +1133,7 @@ struct EraseIdentityGenericOp : public OpRewritePattern<GenericOp> {
     // Get the argument number of the returned values. That is the operand
     // number to use for replacing uses of this operation.
     SmallVector<Value> returnedArgs;
-    for (const auto &yieldVal : llvm::enumerate(yieldOp.values())) {
+    for (const auto &yieldVal : llvm::enumerate(yieldOp.getValues())) {
       auto yieldArg = yieldVal.value().dyn_cast<BlockArgument>();
       if (!yieldArg || yieldArg.getOwner() != &body)
         return failure();
@@ -1200,15 +1200,15 @@ void InitTensorOp::build(OpBuilder &b, OperationState &result,
 LogicalResult InitTensorOp::verify() {
   RankedTensorType resultType = getType();
   SmallVector<int64_t, 4> staticSizes = llvm::to_vector<4>(llvm::map_range(
-      static_sizes().cast<ArrayAttr>(),
+      getStaticSizes().cast<ArrayAttr>(),
       [](Attribute a) -> int64_t { return a.cast<IntegerAttr>().getInt(); }));
 
   if (failed(verifyListOfOperandsOrIntegers(
-          *this, "sizes", resultType.getRank(), static_sizes(), sizes(),
+          *this, "sizes", resultType.getRank(), getStaticSizes(), getSizes(),
           ShapedType::isDynamic)))
     return failure();
 
-  if (static_sizes().size() != static_cast<unsigned>(resultType.getRank()))
+  if (getStaticSizes().size() != static_cast<unsigned>(resultType.getRank()))
     return emitError("expected ") << resultType.getRank() << " sizes values";
 
   Type expectedType = InitTensorOp::inferResultType(
@@ -1230,13 +1230,13 @@ SmallVector<OpFoldResult> InitTensorOp::getMixedSizes() {
   SmallVector<OpFoldResult> mixedSizes;
   mixedSizes.reserve(getType().getRank());
   unsigned dynamicValIndex = 0;
-  for (Attribute attr : static_sizes()) {
+  for (Attribute attr : getStaticSizes()) {
     auto intAttr = attr.cast<IntegerAttr>();
     if (!ShapedType::isDynamic(intAttr.getInt())) {
       mixedSizes.push_back(intAttr);
       continue;
     }
-    mixedSizes.push_back(sizes()[dynamicValIndex++]);
+    mixedSizes.push_back(getSizes()[dynamicValIndex++]);
   }
   return mixedSizes;
 }
@@ -1524,9 +1524,9 @@ LogicalResult IndexOp::verify() {
   auto linalgOp = dyn_cast<LinalgOp>((*this)->getParentOp());
   if (!linalgOp)
     return emitOpError("expected parent op with LinalgOp interface");
-  if (linalgOp.getNumLoops() <= dim())
+  if (linalgOp.getNumLoops() <= getDim())
     return emitOpError("expected dim (")
-           << dim() << ") to be lower than the number of loops ("
+           << getDim() << ") to be lower than the number of loops ("
            << linalgOp.getNumLoops() << ") of the enclosing LinalgOp";
   return success();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
index de21f11ba5364..c3669724ba8e5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp
@@ -273,7 +273,7 @@ struct FoldConstantTranspose : public FoldConstantBase<FoldConstantTranspose> {
 
   RegionComputationFn getRegionComputeFn(GenericOp genericOp) const {
     // Make sure the region only contains a yield op.
-    Block &body = genericOp.region().front();
+    Block &body = genericOp.getRegion().front();
     if (!llvm::hasSingleElement(body))
       return nullptr;
     auto yieldOp = dyn_cast<linalg::YieldOp>(body.getTerminator());
@@ -281,7 +281,7 @@ struct FoldConstantTranspose : public FoldConstantBase<FoldConstantTranspose> {
       return nullptr;
 
     // The yield op should return the block argument corresponds to the input.
-    for (Value yieldVal : yieldOp.values()) {
+    for (Value yieldVal : yieldOp.getValues()) {
       auto yieldArg = yieldVal.dyn_cast<BlockArgument>();
       if (!yieldArg || yieldArg.getOwner() != &body)
         return nullptr;

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
index 662ab86d0dc5a..b69f2db648f47 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
@@ -201,8 +201,8 @@ DecomposeLinalgOp::createPeeledGenericOp(GenericOp genericOp,
   auto indexingMapAttr =
       rewriter.getAffineMapArrayAttr(peeledGenericOpIndexingMaps);
   return rewriter.create<GenericOp>(
-      loc, resultTypes, genericOp.inputs(), outsOperands, indexingMapAttr,
-      genericOp.iterator_types(), /*doc=*/nullptr, /*libraryCall=*/nullptr,
+      loc, resultTypes, genericOp.getInputs(), outsOperands, indexingMapAttr,
+      genericOp.getIteratorTypes(), /*doc=*/nullptr, /*libraryCall=*/nullptr,
       [](OpBuilder, Location, ValueRange) {});
 }
 
@@ -240,8 +240,8 @@ DecomposeLinalgOp::createResidualGenericOp(GenericOp genericOp,
   auto indexingMapAttr = rewriter.getAffineMapArrayAttr(indexingMaps);
   return rewriter.create<GenericOp>(
       genericOp->getLoc(), genericOp->getResultTypes(),
-      residualGenericOpOperands, genericOp.outputs(), indexingMapAttr,
-      genericOp.iterator_types(), /*doc=*/nullptr, /*libraryCall=*/nullptr,
+      residualGenericOpOperands, genericOp.getOutputs(), indexingMapAttr,
+      genericOp.getIteratorTypes(), /*doc=*/nullptr, /*libraryCall=*/nullptr,
       [](OpBuilder, Location, ValueRange) {});
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
index 9a31622b52f0b..b98b486d38ee3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
@@ -65,13 +65,13 @@ class DetensorizeGenericOp : public OpConversionPattern<GenericOp> {
     Block *originalBlock = op->getBlock();
 
     // Gather some information about the op before inling its region.
-    Block *opEntryBlock = &*op.region().begin();
-    YieldOp yieldOp = dyn_cast<YieldOp>(op.region().back().getTerminator());
+    Block *opEntryBlock = &*op.getRegion().begin();
+    YieldOp yieldOp = dyn_cast<YieldOp>(op.getRegion().back().getTerminator());
 
     // Split the op's region before the op. This way, we have a clear insertion
     // point in which the op can be inlined.
     Block *newBlock = rewriter.splitBlock(originalBlock, Block::iterator(op));
-    rewriter.inlineRegionBefore(op.region(), newBlock);
+    rewriter.inlineRegionBefore(op.getRegion(), newBlock);
     // Now that op's region is inlined, the operands of its YieldOp are mapped
     // to the materialized target values. Therefore, we can replace the op's
     // uses with those of its YielOp's operands.
@@ -379,7 +379,7 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
           }
 
           opsToDetensor.insert(genericOp);
-          llvm::append_range(workList, genericOp.inputs());
+          llvm::append_range(workList, genericOp.getInputs());
           continue;
         }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index 21624b70d5e2d..df3b8e6af0437 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -152,15 +152,15 @@ static void replaceUnitDimIndexOps(GenericOp genericOp,
        llvm::make_early_inc_range(genericOp.getBody()->getOps<IndexOp>())) {
     OpBuilder::InsertionGuard guard(rewriter);
     rewriter.setInsertionPoint(indexOp);
-    if (unitDims.count(indexOp.dim()) != 0) {
+    if (unitDims.count(indexOp.getDim()) != 0) {
       rewriter.replaceOpWithNewOp<arith::ConstantIndexOp>(indexOp, 0);
     } else {
       // Update the dimension of the index operation if needed.
       unsigned droppedDims = llvm::count_if(
-          unitDims, [&](unsigned dim) { return dim < indexOp.dim(); });
+          unitDims, [&](unsigned dim) { return dim < indexOp.getDim(); });
       if (droppedDims != 0)
         rewriter.replaceOpWithNewOp<IndexOp>(indexOp,
-                                             indexOp.dim() - droppedDims);
+                                             indexOp.getDim() - droppedDims);
     }
   }
 }
@@ -185,7 +185,7 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
 
     DenseSet<unsigned> unitDims;
     SmallVector<unsigned, 4> unitDimsReductionLoops;
-    ArrayAttr iteratorTypes = genericOp.iterator_types();
+    ArrayAttr iteratorTypes = genericOp.getIteratorTypes();
     for (const auto &expr : enumerate(invertedMap.getResults())) {
       if (AffineDimExpr dimExpr = expr.value().dyn_cast<AffineDimExpr>())
         if (dims[dimExpr.getPosition()] == 1)
@@ -211,8 +211,8 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
     }
 
     rewriter.startRootUpdate(genericOp);
-    genericOp.indexing_mapsAttr(newIndexingMapAttr);
-    genericOp.iterator_typesAttr(ArrayAttr::get(context, newIteratorTypes));
+    genericOp.setIndexingMapsAttr(newIndexingMapAttr);
+    genericOp.setIteratorTypesAttr(ArrayAttr::get(context, newIteratorTypes));
     replaceUnitDimIndexOps(genericOp, unitDims, rewriter);
     rewriter.finalizeRootUpdate(genericOp);
     return success();
@@ -420,8 +420,8 @@ struct ReplaceUnitExtents : public OpRewritePattern<GenericOp> {
       return res;
     };
 
-    SmallVector<Value, 4> newInputs = insertReshapes(genericOp.inputs());
-    SmallVector<Value, 4> newOutputs = insertReshapes(genericOp.outputs());
+    SmallVector<Value, 4> newInputs = insertReshapes(genericOp.getInputs());
+    SmallVector<Value, 4> newOutputs = insertReshapes(genericOp.getOutputs());
 
     // If any result type changes, insert a reshape to convert from the original
     // type to the new type.
@@ -431,10 +431,11 @@ struct ReplaceUnitExtents : public OpRewritePattern<GenericOp> {
       resultTypes.push_back(newInputOutputTypes[i + genericOp.getNumInputs()]);
     GenericOp replacementOp = rewriter.create<GenericOp>(
         loc, resultTypes, newInputs, newOutputs, newIndexingMaps,
-        llvm::to_vector<4>(
-            genericOp.iterator_types().template getAsValueRange<StringAttr>()));
-    rewriter.inlineRegionBefore(genericOp.region(), replacementOp.region(),
-                                replacementOp.region().begin());
+        llvm::to_vector<4>(genericOp.getIteratorTypes()
+                               .template getAsValueRange<StringAttr>()));
+    rewriter.inlineRegionBefore(genericOp.getRegion(),
+                                replacementOp.getRegion(),
+                                replacementOp.getRegion().begin());
 
     // If any result tensor has a modified shape, then add reshape to recover
     // the original shape.

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 34305a0d3887f..d19f926da8660 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -146,7 +146,7 @@ generateFusedElementwiseOpRegion(PatternRewriter &rewriter, GenericOp fusedOp,
   Block &producerBlock = producer->getRegion(0).front();
   Block &consumerBlock = consumer->getRegion(0).front();
   Block *fusedBlock = new Block();
-  fusedOp.region().push_back(fusedBlock);
+  fusedOp.getRegion().push_back(fusedBlock);
   BlockAndValueMapping mapper;
   OpBuilder::InsertionGuard guard(rewriter);
   rewriter.setInsertionPointToStart(fusedBlock);
@@ -167,7 +167,7 @@ generateFusedElementwiseOpRegion(PatternRewriter &rewriter, GenericOp fusedOp,
          llvm::make_early_inc_range(producerBlock.getOps<IndexOp>())) {
       Value newIndex = rewriter.create<mlir::AffineApplyOp>(
           producer.getLoc(),
-          consumerToProducerLoopsMap.getSubMap(indexOp.dim()), fusedIndices);
+          consumerToProducerLoopsMap.getSubMap(indexOp.getDim()), fusedIndices);
       mapper.map(indexOp.getResult(), newIndex);
     }
   }
@@ -320,7 +320,7 @@ fuseElementwiseOpsImpl(GenericOp producer, OpOperand *consumerOpOperand,
       /*inputs=*/fusedOperands,
       // TODO: handle outputs.
       consumerOutputs, rewriter.getAffineMapArrayAttr(fusedIndexMaps),
-      consumer.iterator_types(),
+      consumer.getIteratorTypes(),
       /*doc=*/nullptr,
       /*library_call=*/nullptr);
   if (!fusedOp.getShapesToLoopsMap()) {
@@ -458,14 +458,14 @@ static bool isFusableWithReshapeByDimExpansion(GenericOp genericOp,
   // - The fused tensor is not a scalar.
   // - All the loops are parallel loops.
   return genericOp.hasTensorSemantics() &&
-         llvm::all_of(genericOp.indexing_maps().getValue(),
+         llvm::all_of(genericOp.getIndexingMaps().getValue(),
                       [](Attribute attr) {
                         return attr.cast<AffineMapAttr>()
                             .getValue()
                             .isProjectedPermutation();
                       }) &&
          genericOp.getTiedIndexingMap(fusableOpOperand).getNumResults() > 0 &&
-         llvm::all_of(genericOp.iterator_types(), [](Attribute attr) {
+         llvm::all_of(genericOp.getIteratorTypes(), [](Attribute attr) {
            return attr.cast<StringAttr>().getValue() ==
                   getParallelIteratorTypeName();
          });
@@ -646,19 +646,19 @@ static void updateExpandedGenericOpRegion(PatternRewriter &rewriter,
   for (IndexOp indexOp :
        llvm::make_early_inc_range(fusedRegion.front().getOps<IndexOp>())) {
     ArrayRef<int64_t> expandedDims =
-        expansionInfo.getExpandedDims(indexOp.dim());
+        expansionInfo.getExpandedDims(indexOp.getDim());
     assert(!expandedDims.empty() && "expected valid expansion info");
 
     // Skip index operations that are not affected by the expansion.
     if (expandedDims.size() == 1 &&
-        expandedDims.front() == (int64_t)indexOp.dim())
+        expandedDims.front() == (int64_t)indexOp.getDim())
       continue;
 
     // Linearize the expanded indices of the original index dimension.
     OpBuilder::InsertionGuard guard(rewriter);
     rewriter.setInsertionPointAfter(indexOp);
     ArrayRef<int64_t> expandedDimsShape =
-        expansionInfo.getExpandedShapeOfDim(indexOp.dim()).drop_front();
+        expansionInfo.getExpandedShapeOfDim(indexOp.getDim()).drop_front();
     SmallVector<Value> expandedIndices;
     expandedIndices.reserve(expandedDims.size() - 1);
     llvm::transform(
@@ -1016,7 +1016,8 @@ getCollapsableIterationSpaceDims(GenericOp genericOp, OpOperand *fusableOperand,
 
   // Compute all the loops with the reduction iterator types.
   SmallVector<int64_t> reductionDims;
-  for (const auto &iteratorType : llvm::enumerate(genericOp.iterator_types())) {
+  for (const auto &iteratorType :
+       llvm::enumerate(genericOp.getIteratorTypes())) {
     if (isReductionIterator(iteratorType.value())) {
       reductionDims.push_back(iteratorType.index());
     }
@@ -1024,7 +1025,7 @@ getCollapsableIterationSpaceDims(GenericOp genericOp, OpOperand *fusableOperand,
 
   llvm::SmallDenseSet<unsigned, 4> processedIterationDims;
   AffineMap indexingMap = genericOp.getTiedIndexingMap(fusableOperand);
-  auto iteratorTypes = genericOp.iterator_types().getValue();
+  auto iteratorTypes = genericOp.getIteratorTypes().getValue();
   SmallVector<ReassociationIndices> iterationSpaceReassociation;
   for (ReassociationIndicesRef foldedRangeDims : reassociation) {
     assert(!foldedRangeDims.empty() && "unexpected empty reassociation");
@@ -1323,7 +1324,7 @@ void generateCollapsedIndexingRegion(Location loc, Block *block,
   }
 
   for (auto indexOp : indexOps) {
-    auto dim = indexOp.dim();
+    auto dim = indexOp.getDim();
     rewriter.replaceOp(indexOp, indexReplacementVals[dim]);
   }
 }
@@ -1368,7 +1369,7 @@ static FailureOr<SmallVector<Value>> collapseGenericOpIterationDims(
 
   // Get the iterator types for the operand.
   SmallVector<StringRef> iteratorTypes = getCollapsedOpIteratorTypes(
-      genericOp.iterator_types().getValue(), collapsingInfo);
+      genericOp.getIteratorTypes().getValue(), collapsingInfo);
 
   // Get the indexing maps.
   auto indexingMaps = llvm::to_vector(
@@ -1577,7 +1578,7 @@ class FoldScalarOrSplatConstant : public OpRewritePattern<GenericOp> {
           /*inputs=*/fusedOperands,
           /*outputs=*/outputOperands,
           rewriter.getAffineMapArrayAttr(fusedIndexMaps),
-          genericOp.iterator_types(),
+          genericOp.getIteratorTypes(),
           /*doc=*/nullptr,
           /*library_call=*/nullptr);
 
@@ -1664,7 +1665,7 @@ struct FoldFillWithGenericOp : public OpRewritePattern<GenericOp> {
     if (!genericOp.hasTensorSemantics())
       return failure();
     bool fillFound = false;
-    Block &payload = genericOp.region().front();
+    Block &payload = genericOp.getRegion().front();
     for (OpOperand *opOperand : genericOp.getInputOperands()) {
       if (!genericOp.payloadUsesValueFromOperand(opOperand))
         continue;

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
index 8a63661734442..9c5b3fab22405 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
@@ -58,8 +58,8 @@ FailureOr<GenericOp> mlir::linalg::generalizeNamedOp(RewriterBase &rewriter,
   GenericOp genericOp =
       rewriter.create<GenericOp>(linalgOp.getLoc(), types, inputOperands,
                                  outputOperands, indexingMaps, iterators);
-  rewriter.inlineRegionBefore(linalgOp->getRegion(0), genericOp.region(),
-                              genericOp.region().begin());
+  rewriter.inlineRegionBefore(linalgOp->getRegion(0), genericOp.getRegion(),
+                              genericOp.getRegion().begin());
   rewriter.replaceOp(linalgOp, genericOp->getResults());
   return genericOp;
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp b/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
index 0f926f5a05281..978220efbf208 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
@@ -24,7 +24,7 @@ struct InitTensorLoweringPattern : public OpRewritePattern<InitTensorOp> {
   LogicalResult matchAndRewrite(InitTensorOp op,
                                 PatternRewriter &rewriter) const override {
     rewriter.replaceOpWithNewOp<bufferization::AllocTensorOp>(op, op.getType(),
-                                                              op.sizes());
+                                                              op.getSizes());
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
index d57ecdfadc6b7..b2ad1b24c6b6e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
@@ -56,10 +56,10 @@ struct InlineScalarOperands : public OpRewritePattern<GenericOp> {
     auto newOp = rewriter.create<GenericOp>(
         loc, genericOp->getResultTypes(), newOperands, outputOperands,
         newIndexingMaps,
-        llvm::to_vector<4>(
-            genericOp.iterator_types().template getAsValueRange<StringAttr>()));
-    rewriter.cloneRegionBefore(genericOp.region(), newOp.region(),
-                               newOp.region().begin());
+        llvm::to_vector<4>(genericOp.getIteratorTypes()
+                               .template getAsValueRange<StringAttr>()));
+    rewriter.cloneRegionBefore(genericOp.getRegion(), newOp.getRegion(),
+                               newOp.getRegion().begin());
 
     Block *body = newOp.getBody();
     PatternRewriter::InsertionGuard guard(rewriter);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
index 7582a3c1ce988..5065bc5b4d6f2 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
@@ -77,7 +77,7 @@ mlir::linalg::interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp,
                      rewriter.getAffineMapArrayAttr(newIndexingMaps));
 
   // 3. Compute the interchanged iterator types.
-  ArrayRef<Attribute> itTypes = genericOp.iterator_types().getValue();
+  ArrayRef<Attribute> itTypes = genericOp.getIteratorTypes().getValue();
   SmallVector<Attribute> itTypesVector;
   llvm::append_range(itTypesVector, itTypes);
   SmallVector<int64_t> permutation(interchangeVector.begin(),
@@ -99,7 +99,7 @@ mlir::linalg::interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp,
                         return rewriter.create<IndexOp>(indexOp->getLoc(), dim);
                       });
       rewriter.replaceOpWithNewOp<AffineApplyOp>(
-          indexOp, permutationMap.getSubMap(indexOp.dim()), allIndices);
+          indexOp, permutationMap.getSubMap(indexOp.getDim()), allIndices);
     }
   }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 51f1c89e702ec..3656bc2a1a067 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -189,7 +189,7 @@ static void replaceIndexOpsByInductionVariables(LinalgOp linalgOp,
     LoopLikeOpInterface loopOp = loopOps.back();
     for (IndexOp indexOp :
          llvm::make_early_inc_range(loopOp.getLoopBody().getOps<IndexOp>()))
-      rewriter.replaceOp(indexOp, allIvs[indexOp.dim()]);
+      rewriter.replaceOp(indexOp, allIvs[indexOp.getDim()]);
   }
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp b/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
index b331b6657fcea..1d987ee62c9c9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
@@ -104,8 +104,8 @@ struct SimplifyDepthwiseConvOp
     Value kernel = op.getInputOperand(1)->get();
     Value init = op.getOutputOperand(0)->get();
 
-    auto stride = op.strides();
-    auto dilation = op.dilations();
+    auto stride = op.getStrides();
+    auto dilation = op.getDilations();
 
     return matchAndReplaceDepthwiseConv(operation, input, kernel, nullptr,
                                         nullptr, init, stride, dilation,
@@ -126,8 +126,8 @@ struct SimplifyDepthwiseConvQOp
     Value kZp = op.getInputOperand(3)->get();
     Value init = op.getOutputOperand(0)->get();
 
-    auto stride = op.strides();
-    auto dilation = op.dilations();
+    auto stride = op.getStrides();
+    auto dilation = op.getDilations();
 
     return matchAndReplaceDepthwiseConv(operation, input, kernel, iZp, kZp,
                                         init, stride, dilation, rewriter);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
index 6eb263a0bf156..0ca26ba0140af 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
@@ -201,8 +201,8 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
   GenericOp genericOp = b.create<GenericOp>(
       loc, TypeRange({initOrAllocTensor.getType()}), newInputs,
       ValueRange({identityTensor}), newMaps, newIteratorTypes);
-  b.inlineRegionBefore(op->getRegion(0), genericOp.region(),
-                       genericOp.region().begin());
+  b.inlineRegionBefore(op->getRegion(0), genericOp.getRegion(),
+                       genericOp.getRegion().begin());
 
   // Then create a new reduction that only reduce the newly added dimension
   // from the previous op.
@@ -391,10 +391,10 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   GenericOp genericOp =
       b.create<GenericOp>(loc, ValueRange(newOutputs).getTypes(), newInputs,
                           newOutputs, newMaps, iteratorTypes);
-  b.inlineRegionBefore(op->getRegion(0), genericOp.region(),
-                       genericOp.region().begin());
-  genericOp.region().front().insertArgument(reductionDimPos,
-                                            b.getIntegerType(1), loc);
+  b.inlineRegionBefore(op->getRegion(0), genericOp.getRegion(),
+                       genericOp.getRegion().begin());
+  genericOp.getRegion().front().insertArgument(reductionDimPos,
+                                               b.getIntegerType(1), loc);
 
   // Step 5. Create new reduction ops that only reduce the newly added
   // dimensions from the previous op.

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index 58323db48bedd..67d5e9997bff3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -50,7 +50,7 @@ static LogicalResult inlinePayload(OpBuilder &b, LinalgOp linalgOp,
   map.map(body->getArguments(), argValues);
   for (auto &op : body->without_terminator()) {
     if (auto indexOp = dyn_cast<IndexOp>(&op)) {
-      map.map(indexOp.getResult(), ivs[indexOp.dim()]);
+      map.map(indexOp.getResult(), ivs[indexOp.getDim()]);
       continue;
     }
     b.clone(op, map);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 600300855be5b..2247fc5d97d0a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -860,9 +860,9 @@ DownscaleSizeOneWindowed2DConvolution::returningMatchAndRewrite(
   if (convOp.hasBufferSemantics())
     return failure(); // To be implemented.
 
-  Value input = convOp.inputs().front();
-  Value kernel = convOp.inputs().back();
-  Value output = convOp.outputs().front();
+  Value input = convOp.getInputs().front();
+  Value kernel = convOp.getInputs().back();
+  Value output = convOp.getOutputs().front();
 
   auto inputType = input.getType().dyn_cast<RankedTensorType>();
   auto kernelType = kernel.getType().dyn_cast<RankedTensorType>();
@@ -901,11 +901,12 @@ DownscaleSizeOneWindowed2DConvolution::returningMatchAndRewrite(
 
   // Rank-reduce strides and dilations too.
   // TODO: dropDim 1-liner helper.
-  auto strides = llvm::to_vector<4>(convOp.strides().getValues<int64_t>());
+  auto strides = llvm::to_vector<4>(convOp.getStrides().getValues<int64_t>());
   strides.erase(strides.begin() + (removeH ? 0 : 1));
   auto stridesAttr = rewriter.getI64VectorAttr(strides);
 
-  auto dilations = llvm::to_vector<4>(convOp.dilations().getValues<int64_t>());
+  auto dilations =
+      llvm::to_vector<4>(convOp.getDilations().getValues<int64_t>());
   dilations.erase(dilations.begin() + (removeH ? 0 : 1));
   auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
 
@@ -930,9 +931,9 @@ DownscaleDepthwiseConv2DNhwcHwcOp::returningMatchAndRewrite(
   if (convOp.hasBufferSemantics())
     return failure(); // To be implemented.
 
-  Value input = convOp.inputs().front();
-  Value kernel = convOp.inputs().back();
-  Value output = convOp.outputs().front();
+  Value input = convOp.getInputs().front();
+  Value kernel = convOp.getInputs().back();
+  Value output = convOp.getOutputs().front();
 
   auto inputType = input.getType().dyn_cast<RankedTensorType>();
   auto kernelType = kernel.getType().dyn_cast<RankedTensorType>();
@@ -971,11 +972,12 @@ DownscaleDepthwiseConv2DNhwcHwcOp::returningMatchAndRewrite(
 
   // Rank-reduce strides and dilations too.
   // TODO: dropDim 1-liner helper.
-  auto strides = llvm::to_vector<4>(convOp.strides().getValues<int64_t>());
+  auto strides = llvm::to_vector<4>(convOp.getStrides().getValues<int64_t>());
   strides.erase(strides.begin() + (removeH ? 0 : 1));
   auto stridesAttr = rewriter.getI64VectorAttr(strides);
 
-  auto dilations = llvm::to_vector<4>(convOp.dilations().getValues<int64_t>());
+  auto dilations =
+      llvm::to_vector<4>(convOp.getDilations().getValues<int64_t>());
   dilations.erase(dilations.begin() + (removeH ? 0 : 1));
   auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 99883e4e19644..6975937e622c5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -252,7 +252,7 @@ vectorizeLinalgYield(OpBuilder &b, Operation *op,
   auto yieldOp = dyn_cast<linalg::YieldOp>(op);
   if (!yieldOp)
     return VectorizationResult{VectorizationStatus::Failure, nullptr};
-  for (const auto &outputs : llvm::enumerate(yieldOp.values())) {
+  for (const auto &outputs : llvm::enumerate(yieldOp.getValues())) {
     // TODO: Scan for an opportunity for reuse.
     // TODO: use a map.
     Value vectorValue = bvm.lookup(outputs.value());
@@ -278,23 +278,23 @@ static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op,
   auto targetShape = linalgOp.computeStaticLoopSizes();
   // Compute a one-dimensional index vector for the index op dimension.
   SmallVector<int64_t> constantSeq =
-      llvm::to_vector<16>(llvm::seq<int64_t>(0, targetShape[indexOp.dim()]));
+      llvm::to_vector<16>(llvm::seq<int64_t>(0, targetShape[indexOp.getDim()]));
   auto constantOp =
       b.create<arith::ConstantOp>(loc, b.getIndexVectorAttr(constantSeq));
   // Return the one-dimensional index vector if it lives in the trailing
   // dimension of the iteration space since the vectorization algorithm in this
   // case can handle the broadcast.
-  if (indexOp.dim() == targetShape.size() - 1)
+  if (indexOp.getDim() == targetShape.size() - 1)
     return VectorizationResult{VectorizationStatus::NewOp, constantOp};
   // Otherwise permute the targetShape to move the index dimension last,
   // broadcast the one-dimensional index vector to the permuted shape, and
   // finally transpose the broadcasted index vector to undo the permutation.
-  std::swap(targetShape[indexOp.dim()], targetShape.back());
+  std::swap(targetShape[indexOp.getDim()], targetShape.back());
   auto broadCastOp = b.create<vector::BroadcastOp>(
       loc, VectorType::get(targetShape, b.getIndexType()), constantOp);
   SmallVector<int64_t> transposition =
       llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
-  std::swap(transposition.back(), transposition[indexOp.dim()]);
+  std::swap(transposition.back(), transposition[indexOp.getDim()]);
   auto transposeOp =
       b.create<vector::TransposeOp>(loc, broadCastOp, transposition);
   return VectorizationResult{VectorizationStatus::NewOp, transposeOp};

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 4f14164bf26ca..5eac5ea4ab4d5 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -103,7 +103,7 @@ static bool isTiled(AffineMap map, ArrayRef<OpFoldResult> tileSizes) {
 
 Optional<RegionMatcher::BinaryOpKind>
 RegionMatcher::matchAsScalarBinaryOp(GenericOp op) {
-  auto &region = op.region();
+  auto &region = op.getRegion();
   if (!llvm::hasSingleElement(region))
     return llvm::None;
 
@@ -1062,7 +1062,7 @@ void offsetIndices(RewriterBase &b, LinalgOp linalgOp,
     return;
 
   for (IndexOp indexOp : linalgOp.getBlock()->getOps<IndexOp>()) {
-    if (indexOp.dim() >= offsets.size() || !offsets[indexOp.dim()])
+    if (indexOp.getDim() >= offsets.size() || !offsets[indexOp.getDim()])
       continue;
     OpBuilder::InsertionGuard guard(b);
     b.setInsertionPointAfter(indexOp);
@@ -1070,7 +1070,7 @@ void offsetIndices(RewriterBase &b, LinalgOp linalgOp,
     bindDims(b.getContext(), index, offset);
     OpFoldResult applied = makeComposedFoldedAffineApply(
         b, indexOp.getLoc(), index + offset,
-        {getAsOpFoldResult(indexOp.getResult()), offsets[indexOp.dim()]});
+        {getAsOpFoldResult(indexOp.getResult()), offsets[indexOp.getDim()]});
     Value materialized = materializeOpFoldResult(b, indexOp.getLoc(), applied);
     b.replaceOpWithIf(indexOp, materialized, [&](OpOperand &use) {
       return use.getOwner() != materialized.getDefiningOp();

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index b4cd6c36ced7a..f78b6f0ae8e62 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -58,7 +58,7 @@ static bool isAlloc(OpOperand *op, bool isZero) {
 
 // Helper to detect sampling operation.
 static bool isSampling(GenericOp op) {
-  auto yieldOp = cast<linalg::YieldOp>(op.region().front().getTerminator());
+  auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator());
   if (auto *def = yieldOp.getOperand(0).getDefiningOp()) {
     if (isa<arith::MulFOp>(def) || isa<arith::MulIOp>(def)) {
       // Both scalar input arguments used exactly once.
@@ -85,7 +85,7 @@ static bool isMulChain(Value val, Value x) {
 
 // Helper to detect x = x + <multiplications>.
 static bool isSumOfMul(GenericOp op) {
-  auto yieldOp = cast<linalg::YieldOp>(op.region().front().getTerminator());
+  auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator());
   if (auto *def = yieldOp.getOperand(0).getDefiningOp()) {
     if (isa<arith::AddFOp>(def) || isa<arith::AddIOp>(def)) {
       Value x = op.getBlock()->getArguments().back();
@@ -98,7 +98,7 @@ static bool isSumOfMul(GenericOp op) {
 
 // Helper to detect direct yield of a zero value.
 static bool isZeroYield(GenericOp op) {
-  auto yieldOp = cast<linalg::YieldOp>(op.region().front().getTerminator());
+  auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator());
   if (auto arg = yieldOp.getOperand(0).dyn_cast<BlockArgument>()) {
     if (arg.getOwner()->getParentOp() == op) {
       OpOperand *t = op.getInputAndOutputOperands()[arg.getArgNumber()];
@@ -201,11 +201,11 @@ struct FuseSparseMultiplyOverAdd : public OpRewritePattern<GenericOp> {
         loc, op.getResult(0).getType(), inputOps, outputOps,
         rewriter.getAffineMapArrayAttr(fusedIndexMaps), prod.iterator_types(),
         /*doc=*/nullptr, /*library_call=*/nullptr);
-    Block &prodBlock = prod.region().front();
-    Block &consBlock = op.region().front();
+    Block &prodBlock = prod.getRegion().front();
+    Block &consBlock = op.getRegion().front();
     BlockAndValueMapping mapper;
     Block *fusedBlock = new Block();
-    fusedOp.region().push_back(fusedBlock);
+    fusedOp.getRegion().push_back(fusedBlock);
     unsigned num = prodBlock.getNumArguments();
     for (unsigned i = 0; i < num - 1; i++)
       addArg(mapper, fusedBlock, prodBlock.getArgument(i));

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index f392e1859a447..aab003b57efff 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -932,7 +932,7 @@ static Value relinkBranch(CodeGen &codegen, RewriterBase &rewriter,
                           Block *block, Value e, unsigned ldx) {
   if (Operation *def = e.getDefiningOp()) {
     if (auto indexOp = dyn_cast<linalg::IndexOp>(def))
-      return genIndexValue(codegen, rewriter, indexOp.dim(), ldx);
+      return genIndexValue(codegen, rewriter, indexOp.getDim(), ldx);
     if (def->getBlock() == block) {
       for (unsigned i = 0, n = def->getNumOperands(); i < n; i++)
         def->setOperand(

diff  --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index 6ef79f868f8af..8625708793d15 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -796,7 +796,7 @@ unsigned Merger::buildLattices(unsigned e, unsigned i) {
 
 Optional<unsigned> Merger::buildTensorExpFromLinalg(linalg::GenericOp op) {
   // Build the linalg semantics backward from yield.
-  Operation *yield = op.region().front().getTerminator();
+  Operation *yield = op.getRegion().front().getTerminator();
   assert(isa<linalg::YieldOp>(yield));
   return buildTensorExp(op, yield->getOperand(0));
 }
@@ -880,12 +880,12 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
   }
   // Something defined outside is invariant.
   Operation *def = v.getDefiningOp();
-  if (def->getBlock() != &op.region().front())
+  if (def->getBlock() != &op.getRegion().front())
     return addExp(kInvariant, v);
   // Construct index operations.
   if (def->getNumOperands() == 0) {
     if (auto indexOp = dyn_cast<linalg::IndexOp>(def))
-      return addExp(kIndex, indexOp.dim());
+      return addExp(kIndex, indexOp.getDim());
   }
   // Construct unary operations if subexpression can be built.
   if (def->getNumOperands() == 1) {


        


More information about the Mlir-commits mailing list