[Mlir-commits] [mlir] 6d5fc1e - [mlir] Don't use Optional::getValue (NFC)

Kazu Hirata llvmlistbot at llvm.org
Mon Jun 20 23:20:31 PDT 2022


Author: Kazu Hirata
Date: 2022-06-20T23:20:25-07:00
New Revision: 6d5fc1e3d5ac4d241421d5e3a086ff97fed69b5e

URL: https://github.com/llvm/llvm-project/commit/6d5fc1e3d5ac4d241421d5e3a086ff97fed69b5e
DIFF: https://github.com/llvm/llvm-project/commit/6d5fc1e3d5ac4d241421d5e3a086ff97fed69b5e.diff

LOG: [mlir] Don't use Optional::getValue (NFC)

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
    mlir/include/mlir/IR/DialectImplementation.h
    mlir/lib/Analysis/Presburger/IntegerRelation.cpp
    mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
    mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
    mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
    mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
    mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
    mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
    mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
    mlir/lib/Dialect/Affine/Analysis/Utils.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
    mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
    mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
    mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
    mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp
    mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
    mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
    mlir/lib/Dialect/EmitC/IR/EmitC.cpp
    mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
    mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
    mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
    mlir/lib/Dialect/SCF/IR/SCF.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
    mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
    mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
    mlir/lib/Dialect/Vector/IR/VectorOps.cpp
    mlir/lib/ExecutionEngine/ExecutionEngine.cpp
    mlir/lib/ExecutionEngine/JitRunner.cpp
    mlir/lib/IR/AffineExpr.cpp
    mlir/lib/Parser/AttributeParser.cpp
    mlir/lib/Parser/Parser.cpp
    mlir/lib/Parser/TypeParser.cpp
    mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
    mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
    mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
    mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
    mlir/lib/Transforms/Utils/DialectConversion.cpp
    mlir/test/lib/Dialect/Test/TestTypes.h
    mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
    mlir/tools/mlir-tblgen/OpFormatGen.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
index c793b51024907..d5c253734cee0 100644
--- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
+++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
@@ -875,7 +875,7 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
   let extraClassDeclaration = [{
      bool isExternal() { return !initial_value(); }
      bool isUninitialized() {
-       return !isExternal() && initial_value().getValue().isa<UnitAttr>();
+       return !isExternal() && initial_value()->isa<UnitAttr>();
      }
      /// Returns the constant initial value if the memref.global is a constant,
      /// or null otherwise.

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
index 36d66f123ea5d..9c307bc5cdcf4 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
@@ -32,7 +32,7 @@ parseEnumKeywordAttr(EnumClass &value, ParserType &parser,
   if (parser.parseKeyword(&keyword))
     return failure();
   if (Optional<EnumClass> attr = spirv::symbolizeEnum<EnumClass>(keyword)) {
-    value = attr.getValue();
+    value = *attr;
     return success();
   }
   return parser.emitError(loc, "invalid ")

diff  --git a/mlir/include/mlir/IR/DialectImplementation.h b/mlir/include/mlir/IR/DialectImplementation.h
index e9ec9d2cc2cb1..02d14e6989b20 100644
--- a/mlir/include/mlir/IR/DialectImplementation.h
+++ b/mlir/include/mlir/IR/DialectImplementation.h
@@ -118,7 +118,7 @@ struct FieldParser<
       auto element = FieldParser<ElementT>::parse(parser);
       if (failed(element))
         return failure();
-      elements.push_back(element.getValue());
+      elements.push_back(*element);
       return success();
     };
     if (parser.parseCommaSeparatedList(elementParser))

diff  --git a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
index fc2fbd5b634d9..25d89f93d93d1 100644
--- a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
@@ -771,7 +771,7 @@ Optional<SmallVector<int64_t, 8>> IntegerRelation::findIntegerSample() const {
   SmallVector<int64_t, 8> coneSample(llvm::map_range(shrunkenConeSample, ceil));
 
   // 6) Return transform * concat(boundedSample, coneSample).
-  SmallVector<int64_t, 8> &sample = boundedSample.getValue();
+  SmallVector<int64_t, 8> &sample = *boundedSample;
   sample.append(coneSample.begin(), coneSample.end());
   return transform.postMultiplyWithColumn(sample);
 }

diff  --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
index f94e973acf210..1e0d1adefbc34 100644
--- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
+++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
@@ -224,7 +224,7 @@ class AffineParallelLowering : public OpRewritePattern<AffineParallelOp> {
           arith::symbolizeAtomicRMWKind(
               static_cast<uint64_t>(reduction.cast<IntegerAttr>().getInt()));
       assert(reductionOp && "Reduction operation cannot be of None Type");
-      arith::AtomicRMWKind reductionOpValue = reductionOp.getValue();
+      arith::AtomicRMWKind reductionOpValue = *reductionOp;
       identityVals.push_back(
           arith::getIdentityValue(reductionOpValue, resultType, rewriter, loc));
     }
@@ -244,7 +244,7 @@ class AffineParallelLowering : public OpRewritePattern<AffineParallelOp> {
           arith::symbolizeAtomicRMWKind(
               reductions[i].cast<IntegerAttr>().getInt());
       assert(reductionOp && "Reduction Operation cannot be of None Type");
-      arith::AtomicRMWKind reductionOpValue = reductionOp.getValue();
+      arith::AtomicRMWKind reductionOpValue = *reductionOp;
       rewriter.setInsertionPoint(&parOp.getBody()->back());
       auto reduceOp = rewriter.create<scf::ReduceOp>(
           loc, affineParOpTerminator->getOperand(i));

diff  --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index dacee9972e3d0..b9d4b3fd78cb6 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -314,7 +314,7 @@ LogicalResult GPUModuleConversion::matchAndRewrite(
   // Add a keyword to the module name to avoid symbolic conflict.
   std::string spvModuleName = (kSPIRVModule + moduleOp.getName()).str();
   auto spvModule = rewriter.create<spirv::ModuleOp>(
-      moduleOp.getLoc(), addressingModel, memoryModel.getValue(), llvm::None,
+      moduleOp.getLoc(), addressingModel, *memoryModel, llvm::None,
       StringRef(spvModuleName));
 
   // Move the region from the module op into the SPIR-V module.

diff  --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
index e7e64aece05d2..9428de0535344 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
@@ -103,8 +103,7 @@ class VulkanLaunchFuncToVulkanCallsPass
 
   /// Checks whether the given LLVM::CallOp is a vulkan launch call op.
   bool isVulkanLaunchCallOp(LLVM::CallOp callOp) {
-    return (callOp.getCallee() &&
-            callOp.getCallee().getValue() == kVulkanLaunch &&
+    return (callOp.getCallee() && *callOp.getCallee() == kVulkanLaunch &&
             callOp.getNumOperands() >= kVulkanLaunchNumConfigOperands);
   }
 
@@ -112,7 +111,7 @@ class VulkanLaunchFuncToVulkanCallsPass
   /// op.
   bool isCInterfaceVulkanLaunchCallOp(LLVM::CallOp callOp) {
     return (callOp.getCallee() &&
-            callOp.getCallee().getValue() == kCInterfaceVulkanLaunch &&
+            *callOp.getCallee() == kCInterfaceVulkanLaunch &&
             callOp.getNumOperands() >= kVulkanLaunchNumConfigOperands);
   }
 

diff  --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 750ec9b02cdc8..98013dfa3c5a0 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -402,7 +402,7 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
     // Take advantage if index is constant.
     MemRefType memRefType = operandType.cast<MemRefType>();
     if (Optional<int64_t> index = getConstantDimIndex(dimOp)) {
-      int64_t i = index.getValue();
+      int64_t i = *index;
       if (memRefType.isDynamicDim(i)) {
         // extract dynamic size from the memref descriptor.
         MemRefDescriptor descriptor(adaptor.source());

diff  --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
index 663fa8958baba..f383f9a3995eb 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
@@ -121,7 +121,7 @@ static LogicalResult getKernelGlobalVariables(
 /// Encodes the SPIR-V module's symbolic name into the name of the entry point
 /// function.
 static LogicalResult encodeKernelName(spirv::ModuleOp module) {
-  StringRef spvModuleName = module.sym_name().getValue();
+  StringRef spvModuleName = *module.sym_name();
   // We already know that the module contains exactly one entry point function
   // based on `getKernelGlobalVariables()` call. Update this function's name
   // to:

diff  --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index 1a830b9483003..6e0f007efc4b3 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -539,8 +539,7 @@ class BranchConditionalConversionPattern
     ElementsAttr branchWeights = nullptr;
     if (auto weights = op.branch_weights()) {
       VectorType weightType = VectorType::get(2, rewriter.getI32Type());
-      branchWeights =
-          DenseElementsAttr::get(weightType, weights.getValue().getValue());
+      branchWeights = DenseElementsAttr::get(weightType, weights->getValue());
     }
 
     rewriter.replaceOpWithNewOp<LLVM::CondBrOp>(
@@ -902,7 +901,7 @@ class LoadStorePattern : public SPIRVToLLVMConversion<SPIRVOp> {
                                     /*isVolatile=*/false,
                                     /*isNonTemporal=*/false);
     }
-    auto memoryAccess = op.memory_access().getValue();
+    auto memoryAccess = *op.memory_access();
     switch (memoryAccess) {
     case spirv::MemoryAccess::Aligned:
     case spirv::MemoryAccess::None:

diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 134f1c0d191de..133a3631b7d5a 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1847,7 +1847,7 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
       } else if (elementTy.isa<IntegerType>() && !padOp.quantization_info()) {
         constantAttr = rewriter.getIntegerAttr(elementTy, 0);
       } else if (elementTy.isa<IntegerType>() && padOp.quantization_info()) {
-        int64_t value = padOp.quantization_info().getValue().getInputZp();
+        int64_t value = padOp.quantization_info()->getInputZp();
         constantAttr = rewriter.getIntegerAttr(elementTy, value);
       }
       if (constantAttr)

diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
index e75de868f3733..866fea818e8f8 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -540,7 +540,7 @@ class MatMulConverter : public OpConversionPattern<tosa::MatMulOp> {
       return success();
     }
 
-    auto quantizationInfo = op.quantization_info().getValue();
+    auto quantizationInfo = *op.quantization_info();
     auto aZp = rewriter.create<arith::ConstantOp>(
         loc, rewriter.getI32IntegerAttr(quantizationInfo.getAZp()));
     auto bZp = rewriter.create<arith::ConstantOp>(
@@ -650,7 +650,7 @@ class FullyConnectedConverter
       return success();
     }
 
-    auto quantizationInfo = op.quantization_info().getValue();
+    auto quantizationInfo = *op.quantization_info();
     auto inputZp = rewriter.create<arith::ConstantOp>(
         loc, rewriter.getI32IntegerAttr(quantizationInfo.getInputZp()));
     auto outputZp = rewriter.create<arith::ConstantOp>(
@@ -890,7 +890,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
             // If we have quantization information we need to apply an offset
             // for the input zp value.
             if (op.quantization_info()) {
-              auto quantizationInfo = op.quantization_info().getValue();
+              auto quantizationInfo = *op.quantization_info();
               auto inputZp = rewriter.create<arith::ConstantOp>(
                   loc, b.getIntegerAttr(accETy, quantizationInfo.getInputZp()));
               Value offset =
@@ -926,7 +926,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
             // If we have quantization information we need to apply output
             // zeropoint.
             if (op.quantization_info()) {
-              auto quantizationInfo = op.quantization_info().getValue();
+              auto quantizationInfo = *op.quantization_info();
               auto outputZp = rewriter.create<arith::ConstantOp>(
                   loc, b.getIntegerAttr(scaled.getType(),
                                         quantizationInfo.getOutputZp()));

diff  --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index a212a49f2e596..9ead9e29c71da 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -171,7 +171,7 @@ static Value generateInBoundsCheck(
         vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim);
     AffineExpr d0, d1;
     bindDims(xferOp.getContext(), d0, d1);
-    Value base = xferOp.getIndices()[dim.getValue()];
+    Value base = xferOp.getIndices()[*dim];
     Value memrefIdx = makeComposedAffineApply(b, loc, d0 + d1, {base, iv});
     cond = lb.create<arith::CmpIOp>(arith::CmpIPredicate::sgt, memrefDim,
                                     memrefIdx);

diff  --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
index 00331a920c5f0..6a9a03886f5d9 100644
--- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
@@ -634,7 +634,7 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
     LLVM_DEBUG(llvm::dbgs() << "Dynamic shapes not yet supported\n");
     return None;
   }
-  return getMemRefEltSizeInBytes(memRefType) * numElements.getValue();
+  return getMemRefEltSizeInBytes(memRefType) * *numElements;
 }
 
 /// Returns the size of memref data in bytes if it's statically shaped, None
@@ -964,7 +964,7 @@ mlir::computeSliceUnion(ArrayRef<Operation *> opsA, ArrayRef<Operation *> opsB,
     LLVM_DEBUG(llvm::dbgs() << "Cannot determine if the slice is valid\n");
     return SliceComputationResult::GenericFailure;
   }
-  if (!isSliceValid.getValue())
+  if (!*isSliceValid)
     return SliceComputationResult::IncorrectSliceFailure;
 
   return SliceComputationResult::Success;

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
index 9d22053f19f94..b77ef90b471b9 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
@@ -639,7 +639,7 @@ static bool canRemoveSrcNodeAfterFusion(
       return false;
     }
 
-    if (!isMaximal.getValue()) {
+    if (!*isMaximal) {
       LLVM_DEBUG(llvm::dbgs()
                  << "Src loop can't be removed: fusion is not maximal\n");
       return false;
@@ -1247,7 +1247,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
   }
 
   // Set dstLoopDepth based on best values from search.
-  *dstLoopDepth = bestDstLoopDepth.getValue();
+  *dstLoopDepth = *bestDstLoopDepth;
 
   LLVM_DEBUG(
       llvm::dbgs() << " LoopFusion fusion stats:"

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
index 7e82a47d01edd..3cdae8ab02659 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
@@ -73,7 +73,7 @@ static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
       continue;
     // Adjust the tile size to largest factor of the trip count less than
     // tSize.
-    uint64_t constTripCount = mayConst.getValue();
+    uint64_t constTripCount = *mayConst;
     if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2)
       tSizeAdjusted = constTripCount / 2;
     while (constTripCount % tSizeAdjusted != 0)
@@ -129,7 +129,7 @@ void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
 
   // Check how many times larger the cache size is when compared to footprint.
   uint64_t cacheSizeBytes = cacheSizeInKiB * 1024;
-  uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes);
+  uint64_t excessFactor = llvm::divideCeil(*fp, cacheSizeBytes);
   if (excessFactor <= 1) {
     // No need of any tiling - set tile size to 1.
     std::fill(tileSizes->begin(), tileSizes->end(), 1);

diff  --git a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
index 187078152eab9..7378418dc0afb 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
@@ -370,7 +370,7 @@ LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp,
                                              bool siblingFusionUser) {
   // Check if the reduction loop is a single iteration loop.
   Optional<uint64_t> tripCount = getConstantTripCount(forOp);
-  if (!tripCount || tripCount.getValue() != 1)
+  if (!tripCount || *tripCount != 1)
     return failure();
   auto iterOperands = forOp.getIterOperands();
   auto *parentOp = forOp->getParentOp();
@@ -509,7 +509,7 @@ bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
       return WalkResult::interrupt();
     }
 
-    stats->tripCountMap[childForOp] = maybeConstTripCount.getValue();
+    stats->tripCountMap[childForOp] = *maybeConstTripCount;
     return WalkResult::advance();
   });
   return !walkResult.wasInterrupted();

diff  --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index ea10dbb3f26ec..09f11ebfaeab3 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -130,7 +130,7 @@ static void replaceIterArgsAndYieldResults(AffineForOp forOp) {
 // TODO: extend this for arbitrary affine bounds.
 LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
   Optional<uint64_t> tripCount = getConstantTripCount(forOp);
-  if (!tripCount || tripCount.getValue() != 1)
+  if (!tripCount || *tripCount != 1)
     return failure();
 
   if (forOp.getLowerBoundMap().getNumResults() != 1)
@@ -250,7 +250,7 @@ LogicalResult mlir::affineForOpBodySkew(AffineForOp forOp,
     LLVM_DEBUG(forOp.emitRemark("non-constant trip count loop not handled"));
     return success();
   }
-  uint64_t tripCount = mayBeConstTripCount.getValue();
+  uint64_t tripCount = *mayBeConstTripCount;
 
   assert(isOpwiseShiftValid(forOp, shifts) &&
          "shifts will lead to an invalid transformation\n");
@@ -1437,7 +1437,7 @@ static bool checkLoopInterchangeDependences(
     for (unsigned j = 0; j < maxLoopDepth; ++j) {
       unsigned permIndex = loopPermMapInv[j];
       assert(depComps[permIndex].lb);
-      int64_t depCompLb = depComps[permIndex].lb.getValue();
+      int64_t depCompLb = *depComps[permIndex].lb;
       if (depCompLb > 0)
         break;
       if (depCompLb < 0)
@@ -2095,7 +2095,7 @@ static LogicalResult generateCopy(
     return failure();
   }
 
-  if (numElements.getValue() == 0) {
+  if (*numElements == 0) {
     LLVM_DEBUG(llvm::dbgs() << "Nothing to copy\n");
     *sizeInBytes = 0;
     return success();
@@ -2173,7 +2173,7 @@ static LogicalResult generateCopy(
     // Record it.
     fastBufferMap[memref] = fastMemRef;
     // fastMemRefType is a constant shaped memref.
-    *sizeInBytes = getMemRefSizeInBytes(fastMemRefType).getValue();
+    *sizeInBytes = *getMemRefSizeInBytes(fastMemRefType);
     LLVM_DEBUG(emitRemarkForBlock(*block)
                << "Creating fast buffer of type " << fastMemRefType
                << " and size " << llvm::divideCeil(*sizeInBytes, 1024)
@@ -2184,8 +2184,7 @@ static LogicalResult generateCopy(
     *sizeInBytes = 0;
   }
 
-  auto numElementsSSA =
-      top.create<arith::ConstantIndexOp>(loc, numElements.getValue());
+  auto numElementsSSA = top.create<arith::ConstantIndexOp>(loc, *numElements);
 
   Value dmaStride = nullptr;
   Value numEltPerDmaStride = nullptr;

diff  --git a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
index b73c9039abcf6..bd4b9d7d4a6be 100644
--- a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -41,7 +41,7 @@ struct ConstantOpInterface
         getGlobalFor(constantOp, options.bufferAlignment);
     if (failed(globalOp))
       return failure();
-    memref::GlobalOp globalMemref = globalOp.getValue();
+    memref::GlobalOp globalMemref = *globalOp;
     replaceOpWithNewBufferizedOp<memref::GetGlobalOp>(
         rewriter, op, globalMemref.type(), globalMemref.getName());
 

diff  --git a/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp
index 18c2f2eaf5667..959d6339495e8 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp
@@ -498,7 +498,7 @@ AsyncRuntimePolicyBasedRefCountingPass::addRefCounting(Value value) {
       if (failed(refCount))
         return failure();
 
-      int cnt = refCount.getValue();
+      int cnt = *refCount;
 
       // Create `add_ref` operation before the operand owner.
       if (cnt > 0) {

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
index 9b4831e80e3c9..3689522dd065e 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
@@ -246,8 +246,7 @@ struct CallOpInterface
           getAliasingOpOperand(op, opResult, state);
       assert(aliasingOpOperands.size() == 1 &&
              "expected exactly 1 aliasing OpOperand");
-      assert(aliasingOpOperands.front()->getOperandNumber() ==
-                 maybeEquiv.getValue() &&
+      assert(aliasingOpOperands.front()->getOperandNumber() == *maybeEquiv &&
              "inconsistent analysis state");
 #endif
       return BufferRelation::Equivalent;

diff  --git a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
index aaaf390826d67..41ce156ef0939 100644
--- a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
+++ b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
@@ -745,7 +745,7 @@ static LogicalResult simplifyPassThroughSwitch(SwitchOp op,
     return failure();
 
   rewriter.replaceOpWithNewOp<SwitchOp>(op, op.getFlag(), defaultDest,
-                                        defaultOperands, caseValues.getValue(),
+                                        defaultOperands, *caseValues,
                                         newCaseDests, newCaseOperands);
   return success();
 }

diff  --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
index a310495bffc71..be6dd41c029d0 100644
--- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
+++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
@@ -85,7 +85,7 @@ LogicalResult emitc::CallOp::verify() {
     return emitOpError("callee must not be empty");
 
   if (Optional<ArrayAttr> argsAttr = args()) {
-    for (Attribute arg : argsAttr.getValue()) {
+    for (Attribute arg : *argsAttr) {
       if (arg.getType().isa<IndexType>()) {
         int64_t index = arg.cast<IntegerAttr>().getInt();
         // Args with elements of type index must be in range
@@ -101,7 +101,7 @@ LogicalResult emitc::CallOp::verify() {
   }
 
   if (Optional<ArrayAttr> templateArgsAttr = template_args()) {
-    for (Attribute tArg : templateArgsAttr.getValue()) {
+    for (Attribute tArg : *templateArgsAttr) {
       if (!tArg.isa<TypeAttr>() && !tArg.isa<IntegerAttr>() &&
           !tArg.isa<FloatAttr>() && !tArg.isa<emitc::OpaqueAttr>())
         return emitOpError("template argument has invalid type");

diff  --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
index 7f830b73fe44f..2b0a8ac267101 100644
--- a/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
@@ -292,7 +292,7 @@ SerializeToHsacoPass::translateToLLVMIR(llvm::LLVMContext &llvmContext) {
   }
 
   llvm::Linker linker(*ret);
-  for (std::unique_ptr<llvm::Module> &libModule : mbModules.getValue()) {
+  for (std::unique_ptr<llvm::Module> &libModule : *mbModules) {
     // This bitcode linking code is substantially similar to what is used in
     // hip-clang It imports the library functions into the module, allowing LLVM
     // optimization passes (which must run after linking) to optimize across the

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 1ed13fe7c6c0e..63a89d5ec10e5 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -158,7 +158,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
       return parser.emitError(predicateLoc)
              << "'" << predicateAttr.getValue()
              << "' is an incorrect value of the 'predicate' attribute";
-    predicateValue = static_cast<int64_t>(predicate.getValue());
+    predicateValue = static_cast<int64_t>(*predicate);
   } else {
     Optional<FCmpPredicate> predicate =
         symbolizeFCmpPredicate(predicateAttr.getValue());
@@ -166,7 +166,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
       return parser.emitError(predicateLoc)
              << "'" << predicateAttr.getValue()
              << "' is an incorrect value of the 'predicate' attribute";
-    predicateValue = static_cast<int64_t>(predicate.getValue());
+    predicateValue = static_cast<int64_t>(*predicate);
   }
 
   result.attributes.set("predicate",
@@ -2425,7 +2425,7 @@ static ParseResult parseAtomicBinOp(OpAsmParser &parser, OperationState &result,
            << "' attribute";
   }
 
-  auto value = static_cast<int64_t>(kind.getValue());
+  auto value = static_cast<int64_t>(*kind);
   auto attr = parser.getBuilder().getI64IntegerAttr(value);
   result.addAttribute(attrName, attr);
 
@@ -2452,7 +2452,7 @@ static ParseResult parseAtomicOrdering(OpAsmParser &parser,
            << "' attribute";
   }
 
-  auto value = static_cast<int64_t>(kind.getValue());
+  auto value = static_cast<int64_t>(*kind);
   auto attr = parser.getBuilder().getI64IntegerAttr(value);
   result.addAttribute(attrName, attr);
 

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index f5439cab0db2a..7d4691a96b823 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -372,7 +372,7 @@ LogicalResult MmaOp::verify() {
   if (mmaShape[0] == 16) {
     int64_t kFactor;
     Type multiplicandFragType;
-    switch (getMultiplicandAPtxType().getValue()) {
+    switch (*getMultiplicandAPtxType()) {
     case MMATypes::tf32:
       kFactor = 4;
       multiplicandFragType = i32Ty;
@@ -421,7 +421,7 @@ LogicalResult MmaOp::verify() {
 
   // In the M=8 case, there is only 1 possible case per data type.
   if (mmaShape[0] == 8) {
-    if (getMultiplicandAPtxType().getValue() == MMATypes::f16) {
+    if (*getMultiplicandAPtxType() == MMATypes::f16) {
       expectedA.emplace_back(2, f16x2Ty);
       expectedB.emplace_back(2, f16x2Ty);
       expectedResult.push_back(f16x2x4StructTy);
@@ -430,7 +430,7 @@ LogicalResult MmaOp::verify() {
       expectedC.emplace_back(8, f32Ty);
       allowedShapes.push_back({8, 8, 4});
     }
-    if (getMultiplicandAPtxType().getValue() == MMATypes::f64) {
+    if (*getMultiplicandAPtxType() == MMATypes::f64) {
       Type f64Ty = Float64Type::get(context);
       expectedA.emplace_back(1, f64Ty);
       expectedB.emplace_back(1, f64Ty);

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 9c6e1b314e79e..e22da09e32f09 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1472,7 +1472,7 @@ AffineMap mlir::linalg::extractOrIdentityMap(Optional<AffineMap> maybeMap,
                                              unsigned rank,
                                              MLIRContext *context) {
   if (maybeMap)
-    return maybeMap.getValue();
+    return *maybeMap;
   if (rank == 0)
     return AffineMap::get(context);
   return AffineMap::getMultiDimIdentityMap(rank, context);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 2d35deb679971..f8bb0e40336b1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -841,7 +841,7 @@ class FoldWithProducerReshapeOpByExpansion
           fuseWithReshapeByExpansion(genericOp, reshapeOp, opOperand, rewriter);
       if (!replacementValues)
         return failure();
-      rewriter.replaceOp(genericOp, replacementValues.getValue());
+      rewriter.replaceOp(genericOp, *replacementValues);
       return success();
     }
     return failure();
@@ -876,7 +876,7 @@ struct FoldReshapeWithGenericOpByExpansion
         producer, reshapeOp, producer.getOutputOperand(0), rewriter);
     if (!replacementValues)
       return failure();
-    rewriter.replaceOp(reshapeOp, replacementValues.getValue());
+    rewriter.replaceOp(reshapeOp, *replacementValues);
     return success();
   }
 
@@ -1465,7 +1465,7 @@ class FoldWithProducerReshapeOpByCollapsing
             genericOp, "failed to do the fusion by collapsing transformation");
       }
 
-      rewriter.replaceOp(genericOp, replacements.getValue());
+      rewriter.replaceOp(genericOp, *replacements);
       return success();
     }
     return failure();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 4a3a538b2bf32..2b783a85263b4 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -302,8 +302,7 @@ findFusableProducer(OpOperand &consumerOpOperand,
                    elem.getIndexingOpViewOperandNum();
                return isa<LinalgOp>(elem.getDependentOp()) &&
                       v == consumerOpOperand.get() && operandNum &&
-                      operandNum.getValue() ==
-                          consumerOpOperand.getOperandNumber();
+                      *operandNum == consumerOpOperand.getOperandNumber();
              })) {
       // Consumer consumes this view, `isStructurallyFusableProducer` also
       // checks whether it is a strict subview of the producer view.
@@ -533,7 +532,7 @@ static bool doesTransposeAccess(AffineMap map,
       lastFusableLoop = pos;
       continue;
     }
-    if (pos <= lastFusableLoop.getValue())
+    if (pos <= *lastFusableLoop)
       return true;
     lastFusableLoop = pos;
   }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
index ca968a45d33ff..669d59667f583 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
@@ -288,8 +288,7 @@ LogicalResult TileLoopNest::tileRootOp(
                       .setTileSizes(tileSizes)
                       .setLoopType(LinalgTilingLoopType::Loops);
   if (tileDistribution)
-    tilingOptions =
-        tilingOptions.setDistributionOptions(tileDistribution.getValue());
+    tilingOptions = tilingOptions.setDistributionOptions(*tileDistribution);
 
   // TODO: Propagate RewriterBase everywhere.
   IRRewriter rewriter(b);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index bba097b0d3ce3..a3b95b3fffefc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -116,7 +116,7 @@ bool mlir::linalg::LinalgTransformationFilter::hasReplacementFilter(
     return false;
   auto attr = op->getAttr(LinalgTransforms::kLinalgTransformMarker)
                   .dyn_cast<StringAttr>();
-  return attr && attr == replacement.getValue();
+  return attr && attr == *replacement;
 }
 
 LinalgTilingOptions &
@@ -239,7 +239,7 @@ static FailureOr<Value> padOperandToSmallestStaticBoundingBox(
       LLVM_DEBUG(DBGS() << "No constant bounding box can be found for padding");
       return failure();
     }
-    paddedShape[shapeIdx++] = upperBound.getValue();
+    paddedShape[shapeIdx++] = *upperBound;
   }
   assert(shapeIdx == static_cast<int64_t>(shape.size()) &&
          "expect the dynamic and static ranks to match");
@@ -393,7 +393,7 @@ LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite(
     // could assert, but continue if this is not the case.
     if (!operandNumber)
       continue;
-    if (!fusionOptions.indicesToFuse.count(operandNumber.getValue()))
+    if (!fusionOptions.indicesToFuse.count(*operandNumber))
       continue;
     if (isa<LinalgOp>(dependence.getDependentOp()))
       producers.insert(dependence.getDependentOp());
@@ -554,7 +554,7 @@ mlir::linalg::LinalgPaddingPattern::returningMatchAndRewrite(
         padOp, en.value(), transposeVector, hoistedOp, transposeOps);
     if (failed(newResult))
       continue;
-    rewriter.replaceOp(padOp, newResult.getValue());
+    rewriter.replaceOp(padOp, *newResult);
 
     // Do not apply hoist padding to the newly introduced transpose operations.
     for (GenericOp transposeOp : transposeOps)
@@ -562,7 +562,7 @@ mlir::linalg::LinalgPaddingPattern::returningMatchAndRewrite(
   }
 
   // Replace the original operation to pad.
-  rewriter.replaceOp(linalgOp, newResults.getValue());
+  rewriter.replaceOp(linalgOp, *newResults);
   filter.replaceLinalgTransformationFilter(rewriter, paddedOp);
 
   return paddedOp;
@@ -969,7 +969,7 @@ LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
   bool zeroSliceGuard = true;
   if (controlFn) {
     if (Optional<bool> control = controlFn(sliceOp))
-      zeroSliceGuard = control.getValue();
+      zeroSliceGuard = *control;
     else
       return failure();
   }

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index c9eb42beeb6c6..6e6868662baeb 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -257,8 +257,7 @@ void getUpperBoundForIndex(Value value, AffineMap &boundMap,
     if (!ubConst)
       return;
 
-    boundMap =
-        AffineMap::getConstantMap(ubConst.getValue(), value.getContext());
+    boundMap = AffineMap::getConstantMap(*ubConst, value.getContext());
     return;
   }
 
@@ -714,7 +713,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
   // Modify the lb, ub, and step based on the distribution options.
   SmallVector<DistributionMethod, 0> distributionMethod;
   if (distributionOptions) {
-    auto &options = distributionOptions.getValue();
+    auto &options = *distributionOptions;
     distributionMethod.assign(distributionOptions->distributionMethod.begin(),
                               distributionOptions->distributionMethod.end());
     SmallVector<Range, 2> parallelLoopRanges;

diff  --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 5f26e4b59c114..ee1218a373407 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -762,7 +762,7 @@ LogicalResult DimOp::verify() {
   // Check that constant index is not knowingly out of range.
   auto type = source().getType();
   if (auto memrefType = type.dyn_cast<MemRefType>()) {
-    if (index.getValue() >= memrefType.getRank())
+    if (*index >= memrefType.getRank())
       return emitOpError("index is out of range");
   } else if (type.isa<UnrankedMemRefType>()) {
     // Assume index to be in range.
@@ -1316,7 +1316,7 @@ LogicalResult GlobalOp::verify() {
   }
 
   if (Optional<uint64_t> alignAttr = alignment()) {
-    uint64_t alignment = alignAttr.getValue();
+    uint64_t alignment = *alignAttr;
 
     if (!llvm::isPowerOf2_64(alignment))
       return emitError() << "alignment attribute value " << alignment
@@ -2452,7 +2452,7 @@ static MemRefType getCanonicalSubViewResultType(
   }
   AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
   if (!layoutMap.isIdentity())
-    layoutMap = getProjectedMap(layoutMap, unusedDims.getValue());
+    layoutMap = getProjectedMap(layoutMap, *unusedDims);
   return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
                          nonRankReducedType.getMemorySpace());
 }
@@ -2499,7 +2499,7 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
   ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
   for (const auto &size : llvm::enumerate(mixedSizes)) {
     Optional<int64_t> intValue = getConstantIntValue(size.value());
-    if (!intValue || intValue.getValue() != sourceShape[size.index()])
+    if (!intValue || *intValue != sourceShape[size.index()])
       return false;
   }
   // All conditions met. The `SubViewOp` is foldable as a no-op.

diff  --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 24c9a4c54e06e..b480016e7c7fb 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -791,8 +791,7 @@ LogicalResult OrderedOp::verify() {
                          << "nested inside a worksharing-loop with ordered "
                          << "clause with parameter present";
 
-  if (container.ordered_valAttr().getInt() !=
-      (int64_t)num_loops_val().getValue())
+  if (container.ordered_valAttr().getInt() != (int64_t)*num_loops_val())
     return emitOpError() << "number of variables in depend clause does not "
                          << "match number of iteration variables in the "
                          << "doacross loop";

diff  --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 40275eaafc2ec..012499f7dad38 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -497,7 +497,7 @@ void ForOp::getSuccessorRegions(Optional<unsigned> index,
   }
 
   // Otherwise, the loop may branch back to itself or the parent operation.
-  assert(index.getValue() == 0 && "expected loop region");
+  assert(*index == 0 && "expected loop region");
   regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs()));
   regions.push_back(RegionSuccessor(getResults()));
 }

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
index 98cc0dcaee7bf..ee717d2fe8595 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
@@ -268,7 +268,7 @@ static LogicalResult parseOptionalArrayStride(const SPIRVDialect &dialect,
   if (!optStride)
     return failure();
 
-  if (!(stride = optStride.getValue())) {
+  if (!(stride = *optStride)) {
     parser.emitError(strideLoc, "ArrayStride must be greater than zero");
     return failure();
   }
@@ -507,7 +507,7 @@ template <typename ParseType> struct ParseCommaSeparatedList<ParseType> {
   Optional<std::tuple<ParseType>> operator()(SPIRVDialect const &dialect,
                                              DialectAsmParser &parser) const {
     if (auto value = parseAndVerify<ParseType>(dialect, parser))
-      return std::tuple<ParseType>(value.getValue());
+      return std::tuple<ParseType>(*value);
     return llvm::None;
   }
 };
@@ -542,7 +542,7 @@ static Type parseImageType(SPIRVDialect const &dialect,
 
   if (parser.parseGreater())
     return Type();
-  return ImageType::get(value.getValue());
+  return ImageType::get(*value);
 }
 
 // sampledImage-type :: = `!spv.sampledImage<` image-type `>`

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index f52cd9f6a4916..4e7338fa1133c 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -188,7 +188,7 @@ parseEnumStrAttr(EnumClass &value, OpAsmParser &parser,
     return parser.emitError(loc, "invalid ")
            << attrName << " attribute specification: " << attrVal;
   }
-  value = attrOptional.getValue();
+  value = *attrOptional;
   return success();
 }
 
@@ -2497,7 +2497,7 @@ void spirv::GlobalVariableOp::print(OpAsmPrinter &printer) {
   // Print optional initializer
   if (auto initializer = this->initializer()) {
     printer << " " << kInitializerAttrName << '(';
-    printer.printSymbolName(initializer.getValue());
+    printer.printSymbolName(*initializer);
     printer << ')';
     elidedAttrs.push_back(kInitializerAttrName);
   }

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index bad8922a6fb76..9a94f195805d1 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -207,7 +207,7 @@ getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) {
     auto elementSize = getTypeNumBytes(options, vecType.getElementType());
     if (!elementSize)
       return llvm::None;
-    return vecType.getNumElements() * elementSize.getValue();
+    return vecType.getNumElements() * *elementSize;
   }
 
   if (auto memRefType = type.dyn_cast<MemRefType>()) {
@@ -239,7 +239,7 @@ getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) {
     for (const auto &shape : enumerate(dims))
       memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]);
 
-    return (offset + memrefSize) * elementSize.getValue();
+    return (offset + memrefSize) * *elementSize;
   }
 
   if (auto tensorType = type.dyn_cast<TensorType>()) {
@@ -250,7 +250,7 @@ getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) {
     if (!elementSize)
       return llvm::None;
 
-    int64_t size = elementSize.getValue();
+    int64_t size = *elementSize;
     for (auto shape : tensorType.getShape())
       size *= shape;
 
@@ -604,7 +604,7 @@ static spirv::GlobalVariableOp getBuiltinVariable(Block &body,
             spirv::SPIRVDialect::getAttributeName(
                 spirv::Decoration::BuiltIn))) {
       auto varBuiltIn = spirv::symbolizeBuiltIn(builtinAttr.getValue());
-      if (varBuiltIn && varBuiltIn.getValue() == builtin) {
+      if (varBuiltIn && *varBuiltIn == builtin) {
         return varOp;
       }
     }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index e147ecd2b1693..dae378490d2f7 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -415,7 +415,7 @@ class SparseTensorToDimSizeConverter
       return failure();
     // Generate the call.
     Value src = adaptor.getOperands()[0];
-    int64_t idx = index.getValue();
+    int64_t idx = *index;
     rewriter.replaceOp(op, genDimSizeCall(rewriter, op, enc, src, idx));
     return success();
   }

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 19283f1904c8f..6352db7e31bec 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -309,7 +309,7 @@ LogicalResult DimOp::verify() {
   // Check that constant index is not knowingly out of range.
   auto type = source().getType();
   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
-    if (index.getValue() >= tensorType.getRank())
+    if (*index >= tensorType.getRank())
       return emitOpError("index is out of range");
   } else if (type.isa<UnrankedTensorType>()) {
     // Assume index to be in range.
@@ -1138,7 +1138,7 @@ llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
     // If the size is not 1, or if the current matched dimension of the result
     // is the same static shape as the size value (which is 1), then the
     // dimension is preserved.
-    if (!sizeVal || sizeVal.getValue() != 1 ||
+    if (!sizeVal || *sizeVal != 1 ||
         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
       shapePos++;
       continue;

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp
index fbf3ca1befba4..79c92466695fd 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp
@@ -27,7 +27,7 @@ using namespace mlir;
 /// Returns true if the the given `attrOrValue` is a constant zero.
 static bool isZero(OpFoldResult attrOrValue) {
   if (Optional<int64_t> val = getConstantIntValue(attrOrValue))
-    return val.getValue() == 0;
+    return *val == 0;
   return false;
 }
 

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 562f5dbfd5179..89fc4e318334f 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -347,7 +347,7 @@ struct MaterializePadValue : public OpRewritePattern<tosa::PadOp> {
     } else if (elementTy.isa<IntegerType>() && !op.quantization_info()) {
       constantAttr = rewriter.getIntegerAttr(elementTy, 0);
     } else if (elementTy.isa<IntegerType>() && op.quantization_info()) {
-      auto value = op.quantization_info().getValue().getInputZp();
+      auto value = op.quantization_info()->getInputZp();
       constantAttr = rewriter.getIntegerAttr(elementTy, value);
     }
 

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
index ef94e55c855d3..43611003b91c7 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
@@ -87,7 +87,7 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
           rewriter
               .create<tosa::FullyConnectedOp>(
                   op.getLoc(), fullyConnectedShapeType, reshapedInput,
-                  reshapedWeight, op.bias(), op.quantization_info().getValue())
+                  reshapedWeight, op.bias(), *op.quantization_info())
               .getResult();
     } else {
       fullyConnectedValue = rewriter

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index d4845dd430cd8..120e5024837d4 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -134,8 +134,7 @@ class TransposeConvDilatedConverter
       conv2d = rewriter.create<tosa::Conv2DOp>(
           loc, resultTy, input, reverse2, bias,
           rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride),
-          rewriter.getI64ArrayAttr(dilation),
-          op.quantization_info().getValue());
+          rewriter.getI64ArrayAttr(dilation), *op.quantization_info());
     } else {
       conv2d = rewriter.create<tosa::Conv2DOp>(
           loc, resultTy, input, reverse2, bias,
@@ -304,7 +303,7 @@ class TransposeConvStridedConverter
                    /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}),
                    /*stride=*/rewriter.getI64ArrayAttr({1, 1}),
                    /*dilation=*/rewriter.getI64ArrayAttr({1, 1}),
-                   op.quantization_info().getValue())
+                   *op.quantization_info())
                    .getResult();
     } else {
       conv2d = createOpAndInfer<tosa::Conv2DOp>(

diff  --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index d7c306c91658b..ebf36627b6308 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -272,7 +272,7 @@ Attribute CombiningKindAttr::parse(AsmParser &parser, Type type) {
   if (failed(parser.parseGreater()))
     return {};
 
-  return CombiningKindAttr::get(kind.getValue(), parser.getContext());
+  return CombiningKindAttr::get(*kind, parser.getContext());
 }
 
 Attribute VectorDialect::parseAttribute(DialectAsmParser &parser,

diff  --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp
index cd61a9d5cc732..72e6c058c0334 100644
--- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -303,7 +303,7 @@ ExecutionEngine::create(ModuleOp m, const ExecutionEngineOptions &options) {
   auto compileFunctionCreator = [&](JITTargetMachineBuilder jtmb)
       -> Expected<std::unique_ptr<IRCompileLayer::IRCompiler>> {
     if (options.jitCodeGenOptLevel)
-      jtmb.setCodeGenOptLevel(options.jitCodeGenOptLevel.getValue());
+      jtmb.setCodeGenOptLevel(*options.jitCodeGenOptLevel);
     auto tm = jtmb.createTargetMachine();
     if (!tm)
       return tm.takeError();

diff  --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp
index 827d9b21e9caa..53fdde6789f3d 100644
--- a/mlir/lib/ExecutionEngine/JitRunner.cpp
+++ b/mlir/lib/ExecutionEngine/JitRunner.cpp
@@ -148,8 +148,7 @@ static Error compileAndExecute(Options &options, ModuleOp module,
                                CompileAndExecuteConfig config, void **args) {
   Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel;
   if (auto clOptLevel = getCommandLineOptLevel(options))
-    jitCodeGenOptLevel =
-        static_cast<llvm::CodeGenOpt::Level>(clOptLevel.getValue());
+    jitCodeGenOptLevel = static_cast<llvm::CodeGenOpt::Level>(*clOptLevel);
 
   // If shared library implements custom mlir-runner library init and destroy
   // functions, we'll use them to register the library with the execution

diff  --git a/mlir/lib/IR/AffineExpr.cpp b/mlir/lib/IR/AffineExpr.cpp
index 44bf53bc32b9b..5a19535a062fa 100644
--- a/mlir/lib/IR/AffineExpr.cpp
+++ b/mlir/lib/IR/AffineExpr.cpp
@@ -580,8 +580,7 @@ static AffineExpr simplifyAdd(AffineExpr lhs, AffineExpr rhs) {
   if (rLhsConst && rRhsConst && firstExpr == secondExpr)
     return getAffineBinaryOpExpr(
         AffineExprKind::Mul, firstExpr,
-        getAffineConstantExpr(rLhsConst.getValue() + rRhsConst.getValue(),
-                              lhs.getContext()));
+        getAffineConstantExpr(*rLhsConst + *rRhsConst, lhs.getContext()));
 
   // When doing successive additions, bring constant to the right: turn (d0 + 2)
   // + d1 into (d0 + d1) + 2.

diff  --git a/mlir/lib/Parser/AttributeParser.cpp b/mlir/lib/Parser/AttributeParser.cpp
index 0df2f59bb5027..efc1c226a2aa0 100644
--- a/mlir/lib/Parser/AttributeParser.cpp
+++ b/mlir/lib/Parser/AttributeParser.cpp
@@ -325,7 +325,7 @@ Attribute Parser::parseFloatAttr(Type type, bool isNegative) {
   if (!type.isa<FloatType>())
     return (emitError("floating point value not valid for specified type"),
             nullptr);
-  return FloatAttr::get(type, isNegative ? -val.getValue() : val.getValue());
+  return FloatAttr::get(type, isNegative ? -*val : *val);
 }
 
 /// Construct an APint from a parsed value, a known attribute type and
@@ -696,7 +696,7 @@ DenseElementsAttr TensorLiteralParser::getHexAttr(SMLoc loc,
   }
 
   std::string data;
-  if (parseElementAttrHexValues(p, hexStorage.getValue(), data))
+  if (parseElementAttrHexValues(p, *hexStorage, data))
     return nullptr;
 
   ArrayRef<char> rawData(data.data(), data.size());

diff  --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp
index 551535e189613..170d571c26929 100644
--- a/mlir/lib/Parser/Parser.cpp
+++ b/mlir/lib/Parser/Parser.cpp
@@ -776,7 +776,7 @@ ParseResult OperationParser::parseSSAUse(UnresolvedOperand &result,
       return emitError("result number not allowed in argument list");
 
     if (auto value = getToken().getHashIdentifierNumber())
-      result.number = value.getValue();
+      result.number = *value;
     else
       return emitError("invalid SSA value result number");
     consumeToken(Token::hash_identifier);

diff  --git a/mlir/lib/Parser/TypeParser.cpp b/mlir/lib/Parser/TypeParser.cpp
index a391e894d7ebf..52fae4b50d03e 100644
--- a/mlir/lib/Parser/TypeParser.cpp
+++ b/mlir/lib/Parser/TypeParser.cpp
@@ -161,7 +161,7 @@ ParseResult Parser::parseStridedLayout(int64_t &offset,
   bool question = getToken().is(Token::question);
   if (!maybeOffset && !question)
     return emitWrongTokenError("invalid offset");
-  offset = maybeOffset ? static_cast<int64_t>(maybeOffset.getValue())
+  offset = maybeOffset ? static_cast<int64_t>(*maybeOffset)
                        : MemRefType::getDynamicStrideOrOffset();
   consumeToken();
 
@@ -323,7 +323,7 @@ Type Parser::parseNonFunctionType() {
       signSemantics = *signedness ? IntegerType::Signed : IntegerType::Unsigned;
 
     consumeToken(Token::inttype);
-    return IntegerType::get(getContext(), width.getValue(), signSemantics);
+    return IntegerType::get(getContext(), *width, signSemantics);
   }
 
   // float-type
@@ -590,7 +590,7 @@ ParseResult Parser::parseIntegerInDimensionList(int64_t &value) {
     if (!dimension ||
         *dimension > (uint64_t)std::numeric_limits<int64_t>::max())
       return emitError("invalid dimension");
-    value = (int64_t)dimension.getValue();
+    value = (int64_t)*dimension;
     consumeToken(Token::integer);
   }
   return success();

diff  --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 4583ad2240d83..d0cb2d3562d39 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -536,7 +536,7 @@ convertOmpOrdered(Operation &opInst, llvm::IRBuilderBase &builder,
 
   omp::ClauseDepend dependType = *orderedOp.depend_type_val();
   bool isDependSource = dependType == omp::ClauseDepend::dependsource;
-  unsigned numLoops = orderedOp.num_loops_val().getValue();
+  unsigned numLoops = *orderedOp.num_loops_val();
   SmallVector<llvm::Value *> vecValues =
       moduleTranslation.lookupValues(orderedOp.depend_vec_vars());
 

diff  --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 0be382878083d..923cffc1caf11 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -1005,7 +1005,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
       llvm::SmallVector<llvm::Metadata *, 2> operands;
       operands.push_back({}); // Placeholder for self-reference
       if (Optional<StringRef> description = op.getDescription())
-        operands.push_back(llvm::MDString::get(ctx, description.getValue()));
+        operands.push_back(llvm::MDString::get(ctx, *description));
       llvm::MDNode *domain = llvm::MDNode::get(ctx, operands);
       domain->replaceOperandWith(0, domain); // Self-reference for uniqueness
       aliasScopeDomainMetadataMapping.insert({op, domain});
@@ -1024,7 +1024,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
       operands.push_back({}); // Placeholder for self-reference
       operands.push_back(domain);
       if (Optional<StringRef> description = op.getDescription())
-        operands.push_back(llvm::MDString::get(ctx, description.getValue()));
+        operands.push_back(llvm::MDString::get(ctx, *description));
       llvm::MDNode *scope = llvm::MDNode::get(ctx, operands);
       scope->replaceOperandWith(0, scope); // Self-reference for uniqueness
       aliasScopeMetadataMapping.insert({op, scope});

diff  --git a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
index d0e7f7780aeed..cdf7fc584dec2 100644
--- a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
@@ -143,7 +143,7 @@ Serializer::processSpecConstantOperationOp(spirv::SpecConstantOperationOp op) {
     return failure();
   }
 
-  operands.push_back(static_cast<uint32_t>(enclosedOpcode.getValue()));
+  operands.push_back(static_cast<uint32_t>(*enclosedOpcode));
 
   // Append operands to the enclosed op to the list of operands.
   for (Value operand : enclosedOp.getOperands()) {
@@ -332,7 +332,7 @@ Serializer::processGlobalVariableOp(spirv::GlobalVariableOp varOp) {
 
   // Encode initialization.
   if (auto initializer = varOp.initializer()) {
-    auto initializerID = getVariableID(initializer.getValue());
+    auto initializerID = getVariableID(*initializer);
     if (!initializerID) {
       return emitError(varOp.getLoc(),
                        "invalid usage of undefined variable as initializer");

diff  --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
index 740582125998d..942650b0caa41 100644
--- a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
@@ -210,7 +210,7 @@ LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
            << attrName;
   }
   SmallVector<uint32_t, 1> args;
-  switch (decoration.getValue()) {
+  switch (*decoration) {
   case spirv::Decoration::Binding:
   case spirv::Decoration::DescriptorSet:
   case spirv::Decoration::Location:
@@ -223,7 +223,7 @@ LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
     if (auto strAttr = attr.getValue().dyn_cast<StringAttr>()) {
       auto enumVal = spirv::symbolizeBuiltIn(strAttr.getValue());
       if (enumVal) {
-        args.push_back(static_cast<uint32_t>(enumVal.getValue()));
+        args.push_back(static_cast<uint32_t>(*enumVal));
         break;
       }
       return emitError(loc, "invalid ")
@@ -244,7 +244,7 @@ LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
   default:
     return emitError(loc, "unhandled decoration ") << decorationName;
   }
-  return emitDecoration(resultID, decoration.getValue(), args);
+  return emitDecoration(resultID, *decoration, args);
 }
 
 LogicalResult Serializer::processName(uint32_t resultID, StringRef name) {

diff  --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index 6582be4de4e08..e205bf51b40f2 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -1337,7 +1337,7 @@ FailureOr<Block *> ConversionPatternRewriterImpl::convertBlockSignature(
                                                  argReplacements);
   if (failed(result))
     return failure();
-  if (Block *newBlock = result.getValue()) {
+  if (Block *newBlock = *result) {
     if (newBlock != block)
       blockActions.push_back(BlockAction::getTypeConversion(newBlock));
   }
@@ -3044,7 +3044,7 @@ Value TypeConverter::materializeConversion(
     OpBuilder &builder, Location loc, Type resultType, ValueRange inputs) {
   for (MaterializationCallbackFn &fn : llvm::reverse(materializations))
     if (Optional<Value> result = fn(builder, resultType, inputs, loc))
-      return result.getValue();
+      return *result;
   return nullptr;
 }
 

diff  --git a/mlir/test/lib/Dialect/Test/TestTypes.h b/mlir/test/lib/Dialect/Test/TestTypes.h
index d7b1163f7b39d..bd6421d04735e 100644
--- a/mlir/test/lib/Dialect/Test/TestTypes.h
+++ b/mlir/test/lib/Dialect/Test/TestTypes.h
@@ -61,7 +61,7 @@ struct FieldParser<test::CustomParam> {
     auto value = FieldParser<int>::parse(parser);
     if (failed(value))
       return failure();
-    return test::CustomParam{value.getValue()};
+    return test::CustomParam{*value};
   }
 };
 

diff  --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
index 99d356381c907..9140c53c41dfa 100644
--- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
+++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
@@ -419,7 +419,7 @@ findTypeValue(StringRef typeVar, SmallVectorImpl<LinalgOperandDef> &args) {
         it.value().kind != LinalgOperandDefKind::Scalar &&
         it.value().kind != LinalgOperandDefKind::OutputTensor)
       continue;
-    if (it.value().typeVar.getValue() == typeVar)
+    if (*it.value().typeVar == typeVar)
       return llvm::formatv("block.getArgument({0}).getType()", it.index())
           .str();
   }

diff  --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
index ce0a9af40bf99..212fe0e1204e5 100644
--- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
@@ -1043,7 +1043,7 @@ static void genEnumAttrParser(const NamedAttribute *var, MethodBody &body,
   {
     llvm::raw_string_ostream os(attrBuilderStr);
     os << tgfmt(enumAttr.getConstBuilderTemplate(), &attrTypeCtx,
-                "attrOptional.getValue()");
+                "attrOptional.value()");
   }
 
   // Build a string containing the cases that can be formatted as a keyword.


        


More information about the Mlir-commits mailing list