[Mlir-commits] [mlir] cbb0981 - [mlir] llvm::Optional::value => operator*/operator->
Fangrui Song
llvmlistbot at llvm.org
Sat Dec 17 11:07:45 PST 2022
Author: Fangrui Song
Date: 2022-12-17T19:07:38Z
New Revision: cbb0981388b6b2c4ccc574a674841ecd517115e5
URL: https://github.com/llvm/llvm-project/commit/cbb0981388b6b2c4ccc574a674841ecd517115e5
DIFF: https://github.com/llvm/llvm-project/commit/cbb0981388b6b2c4ccc574a674841ecd517115e5.diff
LOG: [mlir] llvm::Optional::value => operator*/operator->
std::optional::value() has undesired exception checking semantics and is
unavailable in older Xcode (see _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS). The
call sites block std::optional migration.
Added:
Modified:
mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
mlir/include/mlir/IR/OpDefinition.h
mlir/include/mlir/Transforms/DialectConversion.h
mlir/lib/AsmParser/AffineParser.cpp
mlir/lib/AsmParser/LocationParser.cpp
mlir/lib/AsmParser/Parser.cpp
mlir/lib/AsmParser/TypeParser.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
mlir/lib/Dialect/Affine/Analysis/Utils.cpp
mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
mlir/lib/Dialect/Affine/Utils/Utils.cpp
mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp
mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp
mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
index d6cd025d007d4..cd23e1234833d 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
@@ -413,7 +413,7 @@ class FlatAffineValueConstraints : public presburger::IntegerPolyhedron {
inline Value getValue(unsigned pos) const {
assert(pos < getNumDimAndSymbolVars() && "Invalid position");
assert(hasValue(pos) && "variable's Value not set");
- return values[pos].value();
+ return *values[pos];
}
/// Returns true if the pos^th variable has an associated Value.
diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h
index 268f2e456c995..002bec3df3746 100644
--- a/mlir/include/mlir/IR/OpDefinition.h
+++ b/mlir/include/mlir/IR/OpDefinition.h
@@ -47,7 +47,7 @@ class OptionalParseResult {
bool has_value() const { return impl.has_value(); }
/// Access the internal ParseResult value.
- ParseResult value() const { return impl.value(); }
+ ParseResult value() const { return *impl; }
ParseResult operator*() const { return value(); }
private:
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 343d8db8b2393..871f4b0251fe1 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -252,9 +252,9 @@ class TypeConverter {
[callback = std::forward<FnT>(callback)](
T type, SmallVectorImpl<Type> &results, ArrayRef<Type>) {
if (Optional<Type> resultOpt = callback(type)) {
- bool wasSuccess = static_cast<bool>(resultOpt.value());
+ bool wasSuccess = static_cast<bool>(*resultOpt);
if (wasSuccess)
- results.push_back(resultOpt.value());
+ results.push_back(*resultOpt);
return Optional<LogicalResult>(success(wasSuccess));
}
return Optional<LogicalResult>();
diff --git a/mlir/lib/AsmParser/AffineParser.cpp b/mlir/lib/AsmParser/AffineParser.cpp
index c6af9ee50db15..82433beb1dc67 100644
--- a/mlir/lib/AsmParser/AffineParser.cpp
+++ b/mlir/lib/AsmParser/AffineParser.cpp
@@ -332,11 +332,11 @@ AffineExpr AffineParser::parseSymbolSSAIdExpr() {
/// affine-expr ::= integer-literal
AffineExpr AffineParser::parseIntegerExpr() {
auto val = getToken().getUInt64IntegerValue();
- if (!val.has_value() || (int64_t)val.value() < 0)
+ if (!val.has_value() || (int64_t)*val < 0)
return emitError("constant too large for index"), nullptr;
consumeToken(Token::integer);
- return builder.getAffineConstantExpr((int64_t)val.value());
+ return builder.getAffineConstantExpr((int64_t)*val);
}
/// Parses an expression that can be a valid operand of an affine expression.
diff --git a/mlir/lib/AsmParser/LocationParser.cpp b/mlir/lib/AsmParser/LocationParser.cpp
index 02cb3ed22c9bb..61b20179800c6 100644
--- a/mlir/lib/AsmParser/LocationParser.cpp
+++ b/mlir/lib/AsmParser/LocationParser.cpp
@@ -122,7 +122,7 @@ ParseResult Parser::parseNameOrFileLineColLocation(LocationAttr &loc) {
return emitError("expected integer column number in FileLineColLoc");
consumeToken(Token::integer);
- loc = FileLineColLoc::get(ctx, str, line.value(), column.value());
+ loc = FileLineColLoc::get(ctx, str, *line, *column);
return success();
}
diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp
index 6fb32de37b373..864ed2d5ca336 100644
--- a/mlir/lib/AsmParser/Parser.cpp
+++ b/mlir/lib/AsmParser/Parser.cpp
@@ -2057,7 +2057,7 @@ ParseResult OperationParser::parseRegionBody(Region ®ion, SMLoc startLoc,
<< "previously referenced here";
}
Location loc = entryArg.sourceLoc.has_value()
- ? entryArg.sourceLoc.value()
+ ? *entryArg.sourceLoc
: getEncodedSourceLocation(argInfo.location);
BlockArgument arg = block->addArgument(entryArg.type, loc);
diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp
index 259572247dcc8..38ae3ffc078dc 100644
--- a/mlir/lib/AsmParser/TypeParser.cpp
+++ b/mlir/lib/AsmParser/TypeParser.cpp
@@ -273,7 +273,7 @@ Type Parser::parseNonFunctionType() {
auto width = getToken().getIntTypeBitwidth();
if (!width.has_value())
return (emitError("invalid integer width"), nullptr);
- if (width.value() > IntegerType::kMaxWidth) {
+ if (*width > IntegerType::kMaxWidth) {
emitError(getToken().getLoc(), "integer bitwidth is limited to ")
<< IntegerType::kMaxWidth << " bits";
return nullptr;
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index ebc63bdff3dca..7cb72f4b97612 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1453,7 +1453,7 @@ class GenericResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
SmallVector<AffineMap, 2> affineMaps = {
rewriter.getMultiDimIdentityMap(resultTy.getRank())};
auto emptyTensor = b.create<tensor::EmptyOp>(resultTy.getShape(), resultETy,
- dynamicDimsOr.value());
+ *dynamicDimsOr);
auto genericOp = b.create<linalg::GenericOp>(
resultTy, ValueRange({}), ValueRange{emptyTensor}, affineMaps,
getNParallelLoopsAttrs(resultTy.getRank()));
@@ -2051,7 +2051,7 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {
if (!dynamicDimsOr.has_value())
return rewriter.notifyMatchFailure(
op, "tosa.gather currently only supports dynamic batch dimensions");
- SmallVector<Value> dynamicDims = dynamicDimsOr.value();
+ SmallVector<Value> dynamicDims = *dynamicDimsOr;
auto resultElementTy = resultTy.getElementType();
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
index 89cfd7c5a04cc..287f841658e53 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -695,7 +695,7 @@ class MaxPool2dConverter : public OpRewritePattern<tosa::MaxPool2dOp> {
checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()});
if (!dynamicDimsOr.has_value())
return failure();
- SmallVector<Value> dynamicDims = dynamicDimsOr.value();
+ SmallVector<Value> dynamicDims = *dynamicDimsOr;
// Determine what the initial value needs to be for the max pool op.
Attribute initialAttr;
@@ -772,7 +772,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()});
if (!dynamicDimsOr.has_value())
return failure();
- SmallVector<Value> dynamicDims = dynamicDimsOr.value();
+ SmallVector<Value> dynamicDims = *dynamicDimsOr;
// Apply padding as necessary.
llvm::SmallVector<int64_t> pad;
diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index f7eb550a3e15a..82ec921497540 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -98,9 +98,8 @@ static void getXferIndices(OpBuilder &b, OpTy xferOp, Value iv,
if (!isBroadcast) {
AffineExpr d0, d1;
bindDims(xferOp.getContext(), d0, d1);
- Value offset = adaptor.getIndices()[dim.value()];
- indices[dim.value()] =
- makeComposedAffineApply(b, loc, d0 + d1, {offset, iv});
+ Value offset = adaptor.getIndices()[*dim];
+ indices[*dim] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv});
}
}
diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
index c681f2dc56876..d7732b84946d1 100644
--- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
+++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
@@ -103,7 +103,7 @@ static bool staticallyOutOfBounds(OpType op) {
Optional<uint32_t> idxVal = getConstantUint32(idx);
if (!idxVal)
return false;
- indexVal += stride * idxVal.value();
+ indexVal += stride * *idxVal;
}
result += indexVal;
if (result > std::numeric_limits<uint32_t>::max())
diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
index 39f3a1d2a4e8b..453f20c4b4106 100644
--- a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
@@ -1036,16 +1036,15 @@ void FlatAffineValueConstraints::getSliceBounds(
auto ubConst = getConstantBound64(BoundType::UB, pos);
if (lbConst.has_value() && ubConst.has_value()) {
// Detect equality to a constant.
- if (lbConst.value() == ubConst.value()) {
- memo[pos] = getAffineConstantExpr(lbConst.value(), context);
+ if (*lbConst == *ubConst) {
+ memo[pos] = getAffineConstantExpr(*lbConst, context);
changed = true;
continue;
}
// Detect an variable as modulo of another variable w.r.t a
// constant.
- if (detectAsMod(*this, pos, lbConst.value(), ubConst.value(), memo,
- context)) {
+ if (detectAsMod(*this, pos, *lbConst, *ubConst, memo, context)) {
changed = true;
continue;
}
@@ -1146,9 +1145,8 @@ void FlatAffineValueConstraints::getSliceBounds(
<< "WARNING: Potentially over-approximating slice lb\n");
auto lbConst = getConstantBound64(BoundType::LB, pos + offset);
if (lbConst.has_value()) {
- lbMap =
- AffineMap::get(numMapDims, numMapSymbols,
- getAffineConstantExpr(lbConst.value(), context));
+ lbMap = AffineMap::get(numMapDims, numMapSymbols,
+ getAffineConstantExpr(*lbConst, context));
}
}
if (!ubMap || ubMap.getNumResults() > 1) {
@@ -1158,7 +1156,7 @@ void FlatAffineValueConstraints::getSliceBounds(
if (ubConst.has_value()) {
ubMap = AffineMap::get(
numMapDims, numMapSymbols,
- getAffineConstantExpr(ubConst.value() + ubAdjustment, context));
+ getAffineConstantExpr(*ubConst + ubAdjustment, context));
}
}
}
@@ -1698,12 +1696,12 @@ void FlatAffineRelation::compose(const FlatAffineRelation &other) {
// Add and match domain of `rel` to domain of `this`.
for (unsigned i = 0, e = rel.getNumDomainDims(); i < e; ++i)
if (relMaybeValues[i].has_value())
- setValue(i, relMaybeValues[i].value());
+ setValue(i, *relMaybeValues[i]);
// Add and match range of `this` to range of `rel`.
for (unsigned i = 0, e = getNumRangeDims(); i < e; ++i) {
unsigned rangeIdx = rel.getNumDomainDims() + i;
if (thisMaybeValues[rangeIdx].has_value())
- rel.setValue(rangeIdx, thisMaybeValues[rangeIdx].value());
+ rel.setValue(rangeIdx, *thisMaybeValues[rangeIdx]);
}
// Append `this` to `rel` and simplify constraints.
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index babaab31006c1..10d7426cd2b38 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -96,8 +96,8 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
for (auto resultExpr : map.getResults()) {
if (auto constExpr = resultExpr.dyn_cast<AffineConstantExpr>()) {
if (tripCount.has_value())
- tripCount = std::min(tripCount.value(),
- static_cast<uint64_t>(constExpr.getValue()));
+ tripCount =
+ std::min(*tripCount, static_cast<uint64_t>(constExpr.getValue()));
else
tripCount = constExpr.getValue();
} else
diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
index bbcf0e9fde971..47e26e0c562e3 100644
--- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
@@ -1010,7 +1010,7 @@ bool mlir::buildSliceTripCountMap(
}
Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
if (maybeConstTripCount.has_value()) {
- (*tripCountMap)[op] = maybeConstTripCount.value();
+ (*tripCountMap)[op] = *maybeConstTripCount;
continue;
}
return false;
@@ -1019,7 +1019,7 @@ bool mlir::buildSliceTripCountMap(
// Slice bounds are created with a constant ub - lb
diff erence.
if (!tripCount.has_value())
return false;
- (*tripCountMap)[op] = tripCount.value();
+ (*tripCountMap)[op] = *tripCount;
}
return true;
}
@@ -1319,7 +1319,7 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,
Optional<int64_t> size = region.second->getRegionSize();
if (!size.has_value())
return std::nullopt;
- totalSizeInBytes += size.value();
+ totalSizeInBytes += *size;
}
return totalSizeInBytes;
}
diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
index 6efef00eb2d8e..43b1637c4d48e 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
@@ -149,8 +149,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
getMemoryFootprintBytes(forOp,
/*memorySpace=*/0);
return (footprint.has_value() &&
- static_cast<uint64_t>(footprint.value()) >
- fastMemCapacityBytes);
+ static_cast<uint64_t>(*footprint) > fastMemCapacityBytes);
};
// If the memory footprint of the 'affine.for' loop is higher than fast
diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index bb626e2ad8e2c..963602b57d7b8 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -405,12 +405,10 @@ checkTilingLegalityImpl(MutableArrayRef<mlir::AffineForOp> origLoops) {
for (unsigned k = 0, e = depComps.size(); k < e; k++) {
DependenceComponent depComp = depComps[k];
if (depComp.lb.has_value() && depComp.ub.has_value() &&
- depComp.lb.value() < depComp.ub.value() &&
- depComp.ub.value() < 0) {
+ *depComp.lb < *depComp.ub && *depComp.ub < 0) {
LLVM_DEBUG(llvm::dbgs()
- << "Dependence component lb = "
- << Twine(depComp.lb.value())
- << " ub = " << Twine(depComp.ub.value())
+ << "Dependence component lb = " << Twine(*depComp.lb)
+ << " ub = " << Twine(*depComp.ub)
<< " is negative at depth: " << Twine(d)
<< " and thus violates the legality rule.\n");
return false;
@@ -801,11 +799,11 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
newLoops[width + i].setStep(origLoops[i].getStep());
// Set the upper bound.
- if (mayBeConstantCount && mayBeConstantCount.value() < tileSizes[i]) {
+ if (mayBeConstantCount && *mayBeConstantCount < tileSizes[i]) {
// Trip count is less than the tile size: upper bound is lower bound +
// trip count * stepSize.
- AffineMap ubMap = b.getSingleDimShiftAffineMap(
- mayBeConstantCount.value() * origLoops[i].getStep());
+ AffineMap ubMap = b.getSingleDimShiftAffineMap(*mayBeConstantCount *
+ origLoops[i].getStep());
newLoops[width + i].setUpperBound(
/*operands=*/newLoops[i].getInductionVar(), ubMap);
} else if (largestDiv % tileSizes[i] != 0) {
@@ -974,7 +972,7 @@ void mlir::getTileableBands(func::FuncOp f,
LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.has_value()) {
- uint64_t tripCount = mayBeConstantTripCount.value();
+ uint64_t tripCount = *mayBeConstantTripCount;
if (tripCount == 0)
return success();
if (tripCount == 1)
@@ -990,8 +988,8 @@ LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp,
uint64_t unrollFactor) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.has_value() &&
- mayBeConstantTripCount.value() < unrollFactor)
- return loopUnrollByFactor(forOp, mayBeConstantTripCount.value());
+ *mayBeConstantTripCount < unrollFactor)
+ return loopUnrollByFactor(forOp, *mayBeConstantTripCount);
return loopUnrollByFactor(forOp, unrollFactor);
}
@@ -1159,8 +1157,8 @@ LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp,
uint64_t unrollJamFactor) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.has_value() &&
- mayBeConstantTripCount.value() < unrollJamFactor)
- return loopUnrollJamByFactor(forOp, mayBeConstantTripCount.value());
+ *mayBeConstantTripCount < unrollJamFactor)
+ return loopUnrollJamByFactor(forOp, *mayBeConstantTripCount);
return loopUnrollJamByFactor(forOp, unrollJamFactor);
}
@@ -1582,7 +1580,7 @@ AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) {
for (unsigned j = 0; j < maxLoopDepth; ++j) {
DependenceComponent &depComp = depComps[j];
assert(depComp.lb.has_value() && depComp.ub.has_value());
- if (depComp.lb.value() != 0 || depComp.ub.value() != 0)
+ if (*depComp.lb != 0 || *depComp.ub != 0)
isParallelLoop[j] = false;
}
}
diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index eb7224fbdc985..9359fde079e99 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -1796,13 +1796,13 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType,
// always bounded. However, when we have symbols, we may not be able to
// obtain a constant upper bound. Also, mapping to a negative space is
// invalid for normalization.
- if (!ubConst.has_value() || ubConst.value() < 0) {
+ if (!ubConst.has_value() || *ubConst < 0) {
LLVM_DEBUG(llvm::dbgs()
<< "can't normalize map due to unknown/invalid upper bound");
return memrefType;
}
// If dimension of new memrefType is dynamic, the value is -1.
- newShape[d] = ubConst.value() + 1;
+ newShape[d] = *ubConst + 1;
}
}
diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp
index fd745bdad5008..6493ab68f016d 100644
--- a/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp
@@ -78,7 +78,7 @@ void gpu::SerializeToBlobPass::runOnOperation() {
if (!maybeTargetISA.has_value())
return signalPassFailure();
- std::string targetISA = std::move(maybeTargetISA.value());
+ std::string targetISA = std::move(*maybeTargetISA);
LLVM_DEBUG({
llvm::dbgs() << "ISA for module: " << getOperation().getNameAttr() << "\n";
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 33e7a7ce8aa3b..211080e11c35e 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -220,8 +220,7 @@ ParseResult AllocaOp::parse(OpAsmParser &parser, OperationState &result) {
Optional<NamedAttribute> alignmentAttr =
result.attributes.getNamed("alignment");
if (alignmentAttr.has_value()) {
- auto alignmentInt =
- alignmentAttr.value().getValue().dyn_cast<IntegerAttr>();
+ auto alignmentInt = alignmentAttr->getValue().dyn_cast<IntegerAttr>();
if (!alignmentInt)
return parser.emitError(parser.getNameLoc(),
"expected integer alignment");
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
index 603faf22cf2db..5e540979f58ed 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
@@ -298,7 +298,7 @@ struct BubbleUpPackOpThroughElemGenericOpPattern
auto genericOp = bubbleUpPackOpThroughElemGenericOp(rewriter, packOp);
if (failed(genericOp))
return failure();
- rewriter.replaceOp(packOp, genericOp.value().getResults());
+ rewriter.replaceOp(packOp, genericOp->getResults());
return success();
}
};
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 1f8d7af7e39c6..7f2b915731552 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -413,7 +413,7 @@ class FuseElementwiseOps : public OpRewritePattern<GenericOp> {
FailureOr<Operation *> fusedOp = fuseElementwiseOps(rewriter, &opOperand);
if (succeeded(fusedOp)) {
auto replacements =
- fusedOp.value()->getResults().take_back(genericOp.getNumResults());
+ (*fusedOp)->getResults().take_back(genericOp.getNumResults());
rewriter.replaceOp(genericOp, replacements);
return success();
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 049f5cc0c62ac..fd41ed30ca9ab 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -235,7 +235,7 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
getConstantUpperBoundForIndex(materializedSize);
size = failed(upperBound)
? materializedSize
- : b.create<arith::ConstantIndexOp>(loc, upperBound.value());
+ : b.create<arith::ConstantIndexOp>(loc, *upperBound);
}
LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
fullSizes.push_back(size);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 8123b66fb5930..7ffc5864e5f51 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -650,7 +650,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b,
// 2. Create the ForeachThreadOp with an empty region.
scf::ForeachThreadOp foreachThreadOp = b.create<scf::ForeachThreadOp>(
- loc, identityTensor.value()->getResults(),
+ loc, (*identityTensor)->getResults(),
ValueRange(materializedNonZeroNumThreads), mapping);
// 3. Calculate the tile offsets and sizes for the subsequent loop that will
@@ -768,7 +768,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b,
// 8. Return.
ForeachThreadReductionTilingResult results;
- results.initialOp = identityTensor.value();
+ results.initialOp = *identityTensor;
results.loops = foreachThreadOp;
results.parallelTiledOp = tiledOp;
results.mergeOp = mergeOp;
diff --git a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
index 391164c76a07f..c1755ab2a3e9f 100644
--- a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
+++ b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
@@ -32,7 +32,7 @@ transform::MemRefMultiBufferOp::applyToOne(memref::AllocOp target,
return DiagnosedSilenceableFailure::silenceableFailure(std::move(diag));
}
- results.push_back(newBuffer.value());
+ results.push_back(*newBuffer);
return DiagnosedSilenceableFailure::success();
}
diff --git a/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp b/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp
index a44bfa46ca6d5..87cfe7d562c84 100644
--- a/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp
+++ b/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp
@@ -205,9 +205,8 @@ QuantDialectBytecodeInterface::readCalibratedQuantizedType(
llvm::APFloat::IEEEdouble())))
return reader.emitError("invalid CalibratedQuantizedType"),
CalibratedQuantizedType();
- return CalibratedQuantizedType::get(expressedType,
- min.value().convertToDouble(),
- max.value().convertToDouble());
+ return CalibratedQuantizedType::get(expressedType, min->convertToDouble(),
+ max->convertToDouble());
}
void QuantDialectBytecodeInterface::write(CalibratedQuantizedType type,
DialectBytecodeWriter &writer) const {
@@ -234,7 +233,7 @@ UniformQuantizedType QuantDialectBytecodeInterface::readUniformQuantizedType(
return reader.emitError("invalid UniformQuantizedType"),
UniformQuantizedType();
return UniformQuantizedType::get(flags, storageType, expressedType,
- scale.value().convertToDouble(), zeroPoint,
+ scale->convertToDouble(), zeroPoint,
storageTypeMin, storageTypeMax);
}
void QuantDialectBytecodeInterface::write(UniformQuantizedType type,
@@ -263,7 +262,7 @@ QuantDialectBytecodeInterface::readUniformQuantizedPerAxisType(
FailureOr<APFloat> fl =
reader.readAPFloatWithKnownSemantics(APFloat::IEEEdouble());
if (succeeded(fl)) {
- val = fl.value().convertToDouble();
+ val = fl->convertToDouble();
return success();
}
return failure();
diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 7fe9b4baf7659..9d39a1309a5fe 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -379,7 +379,7 @@ mlir::scf::tileUsingSCFForOp(RewriterBase &rewriter, TilingInterface op,
innerMostLoop.getRegionIterArgs());
}
- tilingResult.replacements = replacementOr.value();
+ tilingResult.replacements = *replacementOr;
LLVM_DEBUG({
if (!tilingResult.loops.empty()) {
@@ -438,9 +438,8 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b,
// 3. Generate the tiled implementation within the inner most loop.
b.setInsertionPoint(loops.back().getBody()->getTerminator());
- Operation *parallelOp =
- op.tileToPartialReduction(b, loc, identityTensor.value()->getResults(),
- offsets, sizes, reductionDim);
+ Operation *parallelOp = op.tileToPartialReduction(
+ b, loc, (*identityTensor)->getResults(), offsets, sizes, reductionDim);
SmallVector<OpFoldResult> resultSizesList;
for (size_t i = 0; i < offsets.size(); i++)
@@ -448,8 +447,8 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b,
b.createOrFold<tensor::DimOp>(loc, parallelOp->getResult(0), i));
SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
FailureOr<SmallVector<Value>> replacementOr = yieldTiledValues(
- b, identityTensor.value()->getResults(), parallelOp->getResults(),
- outOffsets, resultSizesList, loops);
+ b, (*identityTensor)->getResults(), parallelOp->getResults(), outOffsets,
+ resultSizesList, loops);
if (failed(replacementOr))
return b.notifyMatchFailure(op, "failed to yield replacement");
@@ -464,12 +463,11 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b,
// 4. Apply the merge reduction to combine all the partial values.
b.setInsertionPointAfter(*loops.begin());
- Operation *mergeOp =
- op.mergeReductions(b, loc, replacementOr.value(), reductionDim);
+ Operation *mergeOp = op.mergeReductions(b, loc, *replacementOr, reductionDim);
b.replaceOp(op, mergeOp->getResults());
SCFReductionTilingResult results;
- results.initialOp = identityTensor.value();
+ results.initialOp = *identityTensor;
results.loops = std::move(loops);
results.parallelTiledOp = parallelOp;
results.mergeOp = mergeOp;
@@ -574,7 +572,7 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
fusableProducer);
if (failed(fusedProducerValue))
continue;
- rewriter.replaceOp(candidateSliceOp, fusedProducerValue.value());
+ rewriter.replaceOp(candidateSliceOp, *fusedProducerValue);
// 2d. The operands of the fused producer might themselved be slices of
// values produced by operations that implement the `TilingInterface`.
@@ -646,8 +644,8 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
iterArgNumber.value(),
dstOp.getTiedOpOperand(fusableProducer)->get());
}
- if (auto dstOp = fusedProducerValue.value()
- .getDefiningOp<DestinationStyleOpInterface>()) {
+ if (auto dstOp = fusedProducerValue
+ ->getDefiningOp<DestinationStyleOpInterface>()) {
scf::ForOp innerMostLoop = tileAndFuseResult.loops.back();
updateDestinationOperandsForTiledOp(
rewriter, dstOp.getDpsInitOperand(resultNumber)->get(),
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
index 306434c0297ce..bca91e56e8f71 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
@@ -240,11 +240,11 @@ OpFoldResult spirv::LogicalAndOp::fold(ArrayRef<Attribute> operands) {
if (Optional<bool> rhs = getScalarOrSplatBoolAttr(operands.back())) {
// x && true = x
- if (rhs.value())
+ if (*rhs)
return getOperand1();
// x && false = false
- if (!rhs.value())
+ if (!*rhs)
return operands.back();
}
@@ -271,12 +271,12 @@ OpFoldResult spirv::LogicalOrOp::fold(ArrayRef<Attribute> operands) {
assert(operands.size() == 2 && "spirv.LogicalOr should take two operands");
if (auto rhs = getScalarOrSplatBoolAttr(operands.back())) {
- if (rhs.value())
+ if (*rhs)
// x || true = true
return operands.back();
// x || false = x
- if (!rhs.value())
+ if (!*rhs)
return getOperand1();
}
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
index d77f7b53a2804..04ebea7ed5403 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
@@ -143,8 +143,8 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
return funcOp.emitRemark("lower entry point failure: could not select "
"execution model based on 'spirv.target_env'");
- builder.create<spirv::EntryPointOp>(funcOp.getLoc(), executionModel.value(),
- funcOp, interfaceVars);
+ builder.create<spirv::EntryPointOp>(funcOp.getLoc(), *executionModel, funcOp,
+ interfaceVars);
// Specifies the spirv.ExecutionModeOp.
if (DenseI32ArrayAttr workgroupSizeAttr = entryPointAttr.getWorkgroupSize()) {
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 8fbbf6aed6b66..4bc24aff0abc5 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -505,7 +505,7 @@ static bool computeIterationGraph(Merger &merger, linalg::GenericOp op,
// Filter loops should be constructed after all the dependent loops,
// i.e., d0 + d1 < filter_loop(d0 + d1)
- if (tldx && merger.isFilterLoop(tldx.value())) {
+ if (tldx && merger.isFilterLoop(*tldx)) {
assert(!ta.isa<AffineDimExpr>() &&
!isDenseDLT(getDimLevelType(enc, d)));
addAffineOrderings(adjM, inDegree, ta, AffineExpr(), std::nullopt,
@@ -1042,11 +1042,11 @@ static void genInvariants(Merger &merger, CodeGen &codegen, OpBuilder &builder,
for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) {
AffineExpr a = map.getResult(toOrigDim(enc, d));
Optional<unsigned> sldx = merger.getLoopIdx(t.getOperandNumber(), d);
- if (sldx && merger.isFilterLoop(sldx.value())) {
- if (!codegen.getLoopIdxValue(sldx.value()))
+ if (sldx && merger.isFilterLoop(*sldx)) {
+ if (!codegen.getLoopIdxValue(*sldx))
// The filter loops has not been constructed.
return;
- if (sldx.value() == ldx)
+ if (*sldx == ldx)
atLevel = true;
} else if (!isInvariantAffine(codegen, a, ldx, atLevel))
return; // still in play
@@ -1351,7 +1351,7 @@ static bool startLoopSeq(Merger &merger, CodeGen &codegen, OpBuilder &builder,
} else {
// sparse/singleton dim levels.
tids.push_back(tid);
- dims.push_back(dim.value());
+ dims.push_back(*dim);
}
});
@@ -1435,11 +1435,11 @@ static void translateBitsToTidDimPairs(
return;
}
condTids.push_back(tid);
- condDims.push_back(dim.value());
+ condDims.push_back(*dim);
} else if (isDenseDLT(dlt)) {
// TODO: get rid of extraTids and extraDims.
extraTids.push_back(tid);
- extraDims.push_back(dim.value());
+ extraDims.push_back(*dim);
} else {
assert(isUndefDLT(dlt));
if (tid >= op.getNumDpsInputs())
@@ -1680,7 +1680,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
if (!optExp.has_value())
return failure();
- unsigned exp = optExp.value();
+ unsigned exp = *optExp;
OpOperand *sparseOut = nullptr;
unsigned outerParNest = 0;
// Computes a topologically sorted iteration graph to ensure tensors
diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index bf6612021f92d..b9f8d26628af0 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -1053,7 +1053,7 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
if (def->getNumOperands() == 1) {
auto x = buildTensorExp(op, def->getOperand(0));
if (x.has_value()) {
- unsigned e = x.value();
+ unsigned e = *x;
if (isa<math::AbsFOp>(def))
return addExp(kAbsF, e);
if (isa<complex::AbsOp>(def))
@@ -1132,8 +1132,8 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
auto x = buildTensorExp(op, def->getOperand(0));
auto y = buildTensorExp(op, def->getOperand(1));
if (x.has_value() && y.has_value()) {
- unsigned e0 = x.value();
- unsigned e1 = y.value();
+ unsigned e0 = *x;
+ unsigned e1 = *y;
if (isa<arith::MulFOp>(def))
return addExp(kMulF, e0, e1);
if (isa<complex::MulOp>(def))
@@ -1188,8 +1188,8 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
auto y = buildTensorExp(op, def->getOperand(1));
auto z = buildTensorExp(op, def->getOperand(2));
if (x.has_value() && y.has_value() && z.has_value()) {
- unsigned e0 = x.value();
- unsigned e1 = y.value();
+ unsigned e0 = *x;
+ unsigned e1 = *y;
if (auto redop = dyn_cast<sparse_tensor::ReduceOp>(def)) {
if (isAdmissibleBranch(redop, redop.getRegion()))
return addExp(kReduce, e0, e1, Value(), def);
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index a469574fc5265..99898f63e4959 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -3197,7 +3197,7 @@ void PackOp::build(OpBuilder &builder, OperationState &state, Value source,
SmallVector<Value> dynamicTileSizes;
dispatchIndexOpFoldResults(innerTiles, dynamicTileSizes, staticTileSizes);
build(builder, state, dest.getType(), source, dest,
- paddingValue ? paddingValue.value() : nullptr,
+ paddingValue ? *paddingValue : nullptr,
outerDimsPerm.empty() ? nullptr
: builder.getDenseI64ArrayAttr(outerDimsPerm),
builder.getDenseI64ArrayAttr(innerDimsPos), dynamicTileSizes,
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
index c476cd1325e59..46324672ad2f2 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
@@ -267,12 +267,12 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp,
getValueOrCreateConstantIndexOp(b, loc, tileSize));
std::optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
if (!failed(cstSize) && cstInnerSize) {
- if (cstSize.value() % cstInnerSize.value() == 0)
+ if (*cstSize % *cstInnerSize == 0)
info.isAlignedToInnerTileSize = true;
// If the tiling size equals to the inner tiling size, the outer dims are
// always 1.
- if (cstInnerSize.value() == cstSize.value()) {
+ if (*cstInnerSize == *cstSize) {
auto lhs = AV(dim0).bind(tileOffset);
auto rhs = AV(dim1).bind(innerTileSize);
info.sourceOffset = ab.floor(lhs, rhs);
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
index 67c949c706c09..fbae33ea00e31 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
@@ -210,6 +210,6 @@ tensor::simplifyCollapseShapeWithRankReducingExtractSlice(
return rewriter
.replaceOpWithNewOp<tensor::CollapseShapeOp>(
- op, sliceOp.getResult(), info->newReassociationIndices.value())
+ op, sliceOp.getResult(), *info->newReassociationIndices)
.getOperation();
}
diff --git a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
index 43c0cdc006b31..65176ed7b9e74 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
@@ -38,5 +38,5 @@ FailureOr<Value> tensor::replaceExtractSliceWithTiledProducer(
if (failed(tiledResult))
return failure();
- return tiledResult.value();
+ return *tiledResult;
}
diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
index cb5b93bf0d600..c0f55b35a0f84 100644
--- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
+++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
@@ -414,7 +414,7 @@ mlir::getSimplifyCollapseShapeWithRankReducingSliceInfo(
for (const auto &[nonUnitDim, indices] :
llvm::zip(*trivialSegments, reassociationIndices)) {
if (nonUnitDim) {
- sliceShape.push_back(sourceType.getDimSize(nonUnitDim.value()));
+ sliceShape.push_back(sourceType.getDimSize(*nonUnitDim));
continue;
}
llvm::append_range(sliceShape, llvm::map_range(indices, [&](int64_t idx) {
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 048c3296dfef4..e67f147399e59 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -557,7 +557,7 @@ class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
loc, op.getLhs(), b, acc, kind, rewriter, isInt);
if (!mult.has_value())
return failure();
- rewriter.replaceOp(op, mult.value());
+ rewriter.replaceOp(op, *mult);
return success();
}
@@ -575,8 +575,7 @@ class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt);
if (!m.has_value())
return failure();
- result = rewriter.create<vector::InsertOp>(loc, resType, m.value(),
- result, pos);
+ result = rewriter.create<vector::InsertOp>(loc, resType, *m, result, pos);
}
rewriter.replaceOp(op, result);
return success();
@@ -1878,7 +1877,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op,
auto newOp = lowerParallel(op, lhsIndex, rhsIndex, rewriter);
if (failed(newOp))
return failure();
- rewriter.replaceOp(op, newOp.value());
+ rewriter.replaceOp(op, *newOp);
return success();
}
@@ -1899,7 +1898,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op,
auto newOp = lowerParallel(op, lhsIndex, /*rhsIndex=*/-1, rewriter);
if (failed(newOp))
return failure();
- rewriter.replaceOp(op, newOp.value());
+ rewriter.replaceOp(op, *newOp);
return success();
}
}
@@ -1911,7 +1910,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op,
auto newOp = lowerParallel(op, /*lhsIndex=*/-1, rhsIndex, rewriter);
if (failed(newOp))
return failure();
- rewriter.replaceOp(op, newOp.value());
+ rewriter.replaceOp(op, *newOp);
return success();
}
}
@@ -1921,7 +1920,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op,
auto newOp = lowerReduction(op, rewriter);
if (failed(newOp))
return failure();
- rewriter.replaceOp(op, newOp.value());
+ rewriter.replaceOp(op, *newOp);
return success();
}
@@ -2021,8 +2020,8 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op,
return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
diag << "expected iterIndex=" << iterIndex << "to map to a RHS dimension";
});
- int64_t lhsIndex = lookupLhs.value();
- int64_t rhsIndex = lookupRhs.value();
+ int64_t lhsIndex = *lookupLhs;
+ int64_t rhsIndex = *lookupRhs;
int64_t dimSize = lhsType.getDimSize(lhsIndex);
if (dimSize != rhsType.getDimSize(rhsIndex))
return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index 77de573aeffee..c1a83e8ac7f93 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -702,7 +702,7 @@ GlobalOp Importer::processGlobal(llvm::GlobalVariable *globalVar) {
convertConstantExpr(globalVar->getInitializer());
if (failed(initializer))
return {};
- builder.create<ReturnOp>(globalOp.getLoc(), initializer.value());
+ builder.create<ReturnOp>(globalOp.getLoc(), *initializer);
}
if (globalVar->hasAtLeastLocalUnnamedAddr()) {
globalOp.setUnnamedAddr(
@@ -865,7 +865,7 @@ FailureOr<Value> Importer::convertConstantExpr(llvm::Constant *constant) {
FailureOr<Value> converted = convertConstant(constantToConvert);
if (failed(converted))
return failure();
- mapValue(constantToConvert, converted.value());
+ mapValue(constantToConvert, *converted);
}
// Update the constant insertion point and return the converted constant.
@@ -903,7 +903,7 @@ Importer::convertValues(ArrayRef<llvm::Value *> values) {
FailureOr<Value> converted = convertValue(value);
if (failed(converted))
return failure();
- remapped.push_back(converted.value());
+ remapped.push_back(*converted);
}
return remapped;
}
@@ -912,7 +912,7 @@ IntegerAttr Importer::matchIntegerAttr(llvm::Value *value) {
IntegerAttr integerAttr;
FailureOr<Value> converted = convertValue(value);
bool success = succeeded(converted) &&
- matchPattern(converted.value(), m_Constant(&integerAttr));
+ matchPattern(*converted, m_Constant(&integerAttr));
assert(success && "expected a constant value");
(void)success;
return integerAttr;
@@ -933,7 +933,7 @@ Importer::convertBranchArgs(llvm::Instruction *branch, llvm::BasicBlock *target,
FailureOr<Value> converted = convertValue(value);
if (failed(converted))
return failure();
- blockArguments.push_back(converted.value());
+ blockArguments.push_back(*converted);
}
return success();
}
@@ -949,13 +949,13 @@ Importer::convertCallTypeAndOperands(llvm::CallBase *callInst,
FailureOr<Value> called = convertValue(callInst->getCalledOperand());
if (failed(called))
return failure();
- operands.push_back(called.value());
+ operands.push_back(*called);
}
SmallVector<llvm::Value *> args(callInst->args());
FailureOr<SmallVector<Value>> arguments = convertValues(args);
if (failed(arguments))
return failure();
- llvm::append_range(operands, arguments.value());
+ llvm::append_range(operands, *arguments);
return success();
}
@@ -1004,7 +1004,7 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder,
FailureOr<Value> condition = convertValue(brInst->getCondition());
if (failed(condition))
return failure();
- builder.create<LLVM::CondBrOp>(loc, condition.value(), succBlocks.front(),
+ builder.create<LLVM::CondBrOp>(loc, *condition, succBlocks.front(),
succBlockArgs.front(), succBlocks.back(),
succBlockArgs.back());
} else {
@@ -1041,7 +1041,7 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder,
caseBlocks[it.index()] = lookupBlock(succBB);
}
- builder.create<SwitchOp>(loc, condition.value(), lookupBlock(defaultBB),
+ builder.create<SwitchOp>(loc, *condition, lookupBlock(defaultBB),
defaultBlockArgs, caseValues, caseBlocks,
caseOperandRefs);
return success();
@@ -1081,7 +1081,7 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder,
FailureOr<Value> operand = convertConstantExpr(lpInst->getClause(i));
if (failed(operand))
return failure();
- operands.push_back(operand.value());
+ operands.push_back(*operand);
}
Type type = convertType(lpInst->getType());
@@ -1136,13 +1136,12 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder,
FailureOr<Value> index = convertValue(operand);
if (failed(index))
return failure();
- indices.push_back(index.value());
+ indices.push_back(*index);
}
Type type = convertType(inst->getType());
- Value res =
- builder.create<GEPOp>(loc, type, sourceElementType, basePtr.value(),
- indices, gepInst->isInBounds());
+ Value res = builder.create<GEPOp>(loc, type, sourceElementType, *basePtr,
+ indices, gepInst->isInBounds());
mapValue(inst, res);
return success();
}
diff --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
index f8f1637891ddd..697121e380c00 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
@@ -310,7 +310,7 @@ convertCallLLVMIntrinsicOp(CallIntrinsicOp &op, llvm::IRBuilderBase &builder,
getOverloadedDeclaration(op, id, module, moduleTranslation);
if (failed(fnOrFailure))
return failure();
- fn = fnOrFailure.value();
+ fn = *fnOrFailure;
} else {
fn = llvm::Intrinsic::getDeclaration(module, id, {});
}
diff --git a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
index d1bcb8dabeb85..7c384e1a48ccd 100644
--- a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
+++ b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
@@ -53,13 +53,13 @@ getDirectionVectorStr(bool ret, unsigned numCommonLoops, unsigned loopNestDepth,
for (const auto &dependenceComponent : dependenceComponents) {
std::string lbStr = "-inf";
if (dependenceComponent.lb.has_value() &&
- dependenceComponent.lb.value() != std::numeric_limits<int64_t>::min())
- lbStr = std::to_string(dependenceComponent.lb.value());
+ *dependenceComponent.lb != std::numeric_limits<int64_t>::min())
+ lbStr = std::to_string(*dependenceComponent.lb);
std::string ubStr = "+inf";
if (dependenceComponent.ub.has_value() &&
- dependenceComponent.ub.value() != std::numeric_limits<int64_t>::max())
- ubStr = std::to_string(dependenceComponent.ub.value());
+ *dependenceComponent.ub != std::numeric_limits<int64_t>::max())
+ ubStr = std::to_string(*dependenceComponent.ub);
result += "[" + lbStr + ", " + ubStr + "]";
}
diff --git a/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp b/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp
index f50988e3b3319..7310958db6f06 100644
--- a/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp
+++ b/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp
@@ -26,7 +26,7 @@ struct TestShapeMappingPass
llvm::Optional<std::reference_wrapper<shape::ShapeMappingAnalysis>>
maybeAnalysis = getCachedAnalysis<shape::ShapeMappingAnalysis>();
if (maybeAnalysis.has_value())
- maybeAnalysis.value().get().print(llvm::errs());
+ maybeAnalysis->get().print(llvm::errs());
else
llvm::errs() << "No cached ShapeMappingAnalysis existed.";
}
diff --git a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
index b8c67c380653e..382847d9a6b66 100644
--- a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
+++ b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
@@ -135,7 +135,7 @@ LinalgTransformationFilter::checkAndNotify(PatternRewriter &rewriter,
void LinalgTransformationFilter::replaceLinalgTransformationFilter(
PatternRewriter &rewriter, Operation *op) const {
if (replacement.has_value())
- op->setAttr(kLinalgTransformMarker, replacement.value());
+ op->setAttr(kLinalgTransformMarker, *replacement);
else
op->removeAttr(rewriter.getStringAttr(kLinalgTransformMarker));
}
diff --git a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
index 555835f2f6ba0..2297c55421342 100644
--- a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
+++ b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
@@ -256,7 +256,7 @@ static LogicalResult emitOneMLIRBuilder(const Record &record, raw_ostream &os,
as << formatv("if (failed(_llvmir_gen_operand_{0}))\n"
" return failure();\n",
name);
- bs << formatv("_llvmir_gen_operand_{0}.value()", name);
+ bs << formatv("*_llvmir_gen_operand_{0}", name);
}
} else if (isResultName(op, name)) {
if (op.getNumResults() != 1)
More information about the Mlir-commits
mailing list