[Mlir-commits] [mlir] [mlir] Construct SmallVector with ArrayRef (NFC) (PR #101896)
Kazu Hirata
llvmlistbot at llvm.org
Sun Aug 4 09:28:08 PDT 2024
https://github.com/kazutakahirata created https://github.com/llvm/llvm-project/pull/101896
None
>From ddda89b002d9f7c049f12315ecc9308f26774643 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Sun, 4 Aug 2024 09:10:55 -0700
Subject: [PATCH] [mlir] Construct SmallVector with ArrayRef (NFC)
---
.../Dialect/Vector/Transforms/VectorRewritePatterns.h | 2 +-
mlir/include/mlir/IR/DialectRegistry.h | 2 +-
mlir/include/mlir/TableGen/Class.h | 3 +--
mlir/lib/Analysis/Presburger/IntegerRelation.cpp | 2 +-
mlir/lib/Analysis/Presburger/Simplex.cpp | 4 ++--
mlir/lib/Analysis/Presburger/Utils.cpp | 2 +-
mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp | 2 +-
mlir/lib/Dialect/Affine/IR/AffineOps.cpp | 6 ++----
mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp | 4 ++--
mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp | 3 +--
mlir/lib/Dialect/GPU/TransformOps/Utils.cpp | 3 +--
mlir/lib/Dialect/GPU/Transforms/NVVMAttachTarget.cpp | 2 +-
mlir/lib/Dialect/GPU/Transforms/ROCDLAttachTarget.cpp | 2 +-
.../Dialect/Linalg/TransformOps/LinalgTransformOps.cpp | 6 ++----
mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp | 3 +--
mlir/lib/Dialect/Linalg/Transforms/Loops.cpp | 4 ++--
mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp | 2 +-
.../Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp | 3 +--
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp | 2 +-
mlir/lib/Dialect/Linalg/Utils/Utils.cpp | 3 +--
.../Math/Transforms/PolynomialApproximation.cpp | 2 +-
mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp | 2 +-
.../Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp | 10 +++++-----
mlir/lib/Dialect/SCF/Utils/Utils.cpp | 2 +-
mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp | 2 +-
.../SparseTensor/Transforms/SparseBufferRewriting.cpp | 3 +--
.../SparseTensor/Transforms/SparseVectorization.cpp | 4 ++--
mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 6 ++----
.../Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp | 9 ++++-----
mlir/lib/Dialect/Tensor/Utils/Utils.cpp | 3 +--
.../Transform/Interfaces/TransformInterfaces.cpp | 2 +-
mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 5 ++---
mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp | 4 ++--
.../Dialect/Vector/Transforms/LowerVectorTransfer.cpp | 3 +--
.../lib/Dialect/Vector/Transforms/VectorDistribute.cpp | 9 +++------
mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp | 2 +-
mlir/lib/IR/AffineExpr.cpp | 3 +--
mlir/lib/IR/AffineMap.cpp | 7 +++----
mlir/lib/IR/OperationSupport.cpp | 2 +-
mlir/lib/Interfaces/FunctionInterfaces.cpp | 2 +-
mlir/lib/Target/LLVMIR/ModuleImport.cpp | 2 +-
mlir/lib/Transforms/Mem2Reg.cpp | 3 +--
mlir/lib/Transforms/SROA.cpp | 3 +--
mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp | 5 ++---
44 files changed, 65 insertions(+), 90 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h b/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h
index 8e6d36f0b5f09..10970fd03e6eb 100644
--- a/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h
+++ b/mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h
@@ -56,7 +56,7 @@ struct UnrollVectorOptions {
/// Set the native shape to use for unrolling.
UnrollVectorOptions &setNativeShape(ArrayRef<int64_t> shape) {
- SmallVector<int64_t> tsShape(shape.begin(), shape.end());
+ SmallVector<int64_t> tsShape(shape);
nativeShape = [=](Operation *) -> std::optional<SmallVector<int64_t>> {
return tsShape;
};
diff --git a/mlir/include/mlir/IR/DialectRegistry.h b/mlir/include/mlir/IR/DialectRegistry.h
index c13a1a1858eb1..8e394988119da 100644
--- a/mlir/include/mlir/IR/DialectRegistry.h
+++ b/mlir/include/mlir/IR/DialectRegistry.h
@@ -60,7 +60,7 @@ class DialectExtensionBase {
/// If the list is empty, the extension is invoked for every loaded dialect
/// independently.
DialectExtensionBase(ArrayRef<StringRef> dialectNames)
- : dialectNames(dialectNames.begin(), dialectNames.end()) {}
+ : dialectNames(dialectNames) {}
private:
/// The names of the dialects affected by this extension.
diff --git a/mlir/include/mlir/TableGen/Class.h b/mlir/include/mlir/TableGen/Class.h
index 92fec6a3b11d9..855952d19492d 100644
--- a/mlir/include/mlir/TableGen/Class.h
+++ b/mlir/include/mlir/TableGen/Class.h
@@ -129,8 +129,7 @@ class MethodSignature {
ArrayRef<MethodParameter> parameters)
: MethodSignature(std::forward<RetTypeT>(retType),
std::forward<NameT>(name),
- SmallVector<MethodParameter>(parameters.begin(),
- parameters.end())) {}
+ SmallVector<MethodParameter>(parameters)) {}
/// Create a method signature with a return type, a method name, and a
/// variadic list of parameters.
template <typename RetTypeT, typename NameT, typename... Parameters>
diff --git a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
index bdcb55251b104..87204d2100713 100644
--- a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
@@ -1515,7 +1515,7 @@ void IntegerRelation::addLocalFloorDiv(ArrayRef<DynamicAPInt> dividend,
appendVar(VarKind::Local);
- SmallVector<DynamicAPInt, 8> dividendCopy(dividend.begin(), dividend.end());
+ SmallVector<DynamicAPInt, 8> dividendCopy(dividend);
dividendCopy.insert(dividendCopy.end() - 1, DynamicAPInt(0));
addInequality(
getDivLowerBound(dividendCopy, divisor, dividendCopy.size() - 2));
diff --git a/mlir/lib/Analysis/Presburger/Simplex.cpp b/mlir/lib/Analysis/Presburger/Simplex.cpp
index 7c8a019557132..c78a0723a6c0f 100644
--- a/mlir/lib/Analysis/Presburger/Simplex.cpp
+++ b/mlir/lib/Analysis/Presburger/Simplex.cpp
@@ -1306,7 +1306,7 @@ void SimplexBase::addDivisionVariable(ArrayRef<DynamicAPInt> coeffs,
assert(denom > 0 && "Denominator must be positive!");
appendVariable();
- SmallVector<DynamicAPInt, 8> ineq(coeffs.begin(), coeffs.end());
+ SmallVector<DynamicAPInt, 8> ineq(coeffs);
DynamicAPInt constTerm = ineq.back();
ineq.back() = -denom;
ineq.emplace_back(constTerm);
@@ -1754,7 +1754,7 @@ class presburger::GBRSimplex {
getCoeffsForDirection(ArrayRef<DynamicAPInt> dir) {
assert(2 * dir.size() == simplex.getNumVariables() &&
"Direction vector has wrong dimensionality");
- SmallVector<DynamicAPInt, 8> coeffs(dir.begin(), dir.end());
+ SmallVector<DynamicAPInt, 8> coeffs(dir);
coeffs.reserve(dir.size() + 1);
for (const DynamicAPInt &coeff : dir)
coeffs.emplace_back(-coeff);
diff --git a/mlir/lib/Analysis/Presburger/Utils.cpp b/mlir/lib/Analysis/Presburger/Utils.cpp
index e74aae7079680..f12bd5239245d 100644
--- a/mlir/lib/Analysis/Presburger/Utils.cpp
+++ b/mlir/lib/Analysis/Presburger/Utils.cpp
@@ -319,7 +319,7 @@ presburger::getDivUpperBound(ArrayRef<DynamicAPInt> dividend,
assert(divisor > 0 && "divisor must be positive!");
assert(dividend[localVarIdx] == 0 &&
"Local to be set to division must have zero coeff!");
- SmallVector<DynamicAPInt, 8> ineq(dividend.begin(), dividend.end());
+ SmallVector<DynamicAPInt, 8> ineq(dividend);
ineq[localVarIdx] = -divisor;
return ineq;
}
diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 0150ff667e4ef..e059d31ca5842 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -63,7 +63,7 @@ static void getXferIndices(RewriterBase &rewriter, TransferOpType xferOp,
for (auto expr : xferOp.getPermutationMap().getResults()) {
if (auto dim = dyn_cast<AffineDimExpr>(expr)) {
Value prevIdx = indices[dim.getPosition()];
- SmallVector<OpFoldResult, 3> dims(dimValues.begin(), dimValues.end());
+ SmallVector<OpFoldResult, 3> dims(dimValues);
dims.push_back(prevIdx);
AffineExpr d0 = rewriter.getAffineDimExpr(offsetMap.getNumDims());
indices[dim.getPosition()] = affine::makeComposedAffineApply(
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index a40ffd53f9d71..35d5f53aad241 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -4146,11 +4146,9 @@ static ParseResult parseAffineMapWithMinMax(OpAsmParser &parser,
llvm::append_range(flatExprs, map.getValue().getResults());
auto operandsRef = llvm::ArrayRef(mapOperands);
auto dimsRef = operandsRef.take_front(map.getValue().getNumDims());
- SmallVector<OpAsmParser::UnresolvedOperand> dims(dimsRef.begin(),
- dimsRef.end());
+ SmallVector<OpAsmParser::UnresolvedOperand> dims(dimsRef);
auto symsRef = operandsRef.drop_front(map.getValue().getNumDims());
- SmallVector<OpAsmParser::UnresolvedOperand> syms(symsRef.begin(),
- symsRef.end());
+ SmallVector<OpAsmParser::UnresolvedOperand> syms(symsRef);
flatDimOperands.append(map.getValue().getNumResults(), dims);
flatSymOperands.append(map.getValue().getNumResults(), syms);
numMapsPerGroup.push_back(map.getValue().getNumResults());
diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index f09d93f3ba444..d68a29f07f1b6 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -1384,7 +1384,7 @@ unsigned mlir::affine::permuteLoops(MutableArrayRef<AffineForOp> input,
assert(input.size() == permMap.size() && "invalid permutation map size");
// Check whether the permutation spec is valid. This is a small vector - we'll
// just sort and check if it's iota.
- SmallVector<unsigned, 4> checkPermMap(permMap.begin(), permMap.end());
+ SmallVector<unsigned, 4> checkPermMap(permMap);
llvm::sort(checkPermMap);
if (llvm::any_of(llvm::enumerate(checkPermMap),
[](const auto &en) { return en.value() != en.index(); }))
@@ -1583,7 +1583,7 @@ SmallVector<SmallVector<AffineForOp, 8>, 8>
mlir::affine::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
ArrayRef<AffineForOp> targets) {
SmallVector<SmallVector<AffineForOp, 8>, 8> res;
- SmallVector<AffineForOp, 8> currentTargets(targets.begin(), targets.end());
+ SmallVector<AffineForOp, 8> currentTargets(targets);
for (auto it : llvm::zip(forOps, sizes)) {
auto step = stripmineSink(std::get<0>(it), std::get<1>(it), currentTargets);
res.push_back(step);
diff --git a/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp b/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp
index e2d42e961c576..70fd9bc0a1e68 100644
--- a/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp
@@ -44,8 +44,7 @@ struct NarrowingPattern : OpRewritePattern<SourceOp> {
NarrowingPattern(MLIRContext *ctx, const ArithIntNarrowingOptions &options,
PatternBenefit benefit = 1)
: OpRewritePattern<SourceOp>(ctx, benefit),
- supportedBitwidths(options.bitwidthsSupported.begin(),
- options.bitwidthsSupported.end()) {
+ supportedBitwidths(options.bitwidthsSupported) {
assert(!supportedBitwidths.empty() && "Invalid options");
assert(!llvm::is_contained(supportedBitwidths, 0) && "Invalid bitwidth");
llvm::sort(supportedBitwidths);
diff --git a/mlir/lib/Dialect/GPU/TransformOps/Utils.cpp b/mlir/lib/Dialect/GPU/TransformOps/Utils.cpp
index e8ecbe16c3f06..17bda27b55811 100644
--- a/mlir/lib/Dialect/GPU/TransformOps/Utils.cpp
+++ b/mlir/lib/Dialect/GPU/TransformOps/Utils.cpp
@@ -148,8 +148,7 @@ static GpuIdBuilderFnType common3DIdBuilderFn(int64_t multiplicity = 1) {
rewriter, loc, d0.floorDiv(multiplicity), {scaledIds[0]})
.get<Value>();
// In the 3-D mapping case, unscale the first dimension by the multiplicity.
- SmallVector<int64_t> forallMappingSizeInOriginalBasis(
- forallMappingSizes.begin(), forallMappingSizes.end());
+ SmallVector<int64_t> forallMappingSizeInOriginalBasis(forallMappingSizes);
forallMappingSizeInOriginalBasis[0] *= multiplicity;
return IdBuilderResult{
/*mappingIdOps=*/scaledIds,
diff --git a/mlir/lib/Dialect/GPU/Transforms/NVVMAttachTarget.cpp b/mlir/lib/Dialect/GPU/Transforms/NVVMAttachTarget.cpp
index ad6b0afa29a1a..dd705cd338312 100644
--- a/mlir/lib/Dialect/GPU/Transforms/NVVMAttachTarget.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/NVVMAttachTarget.cpp
@@ -61,7 +61,7 @@ DictionaryAttr NVVMAttachTarget::getFlags(OpBuilder &builder) const {
void NVVMAttachTarget::runOnOperation() {
OpBuilder builder(&getContext());
ArrayRef<std::string> libs(linkLibs);
- SmallVector<StringRef> filesToLink(libs.begin(), libs.end());
+ SmallVector<StringRef> filesToLink(libs);
auto target = builder.getAttr<NVVMTargetAttr>(
optLevel, triple, chip, features, getFlags(builder),
filesToLink.empty() ? nullptr : builder.getStrArrayAttr(filesToLink));
diff --git a/mlir/lib/Dialect/GPU/Transforms/ROCDLAttachTarget.cpp b/mlir/lib/Dialect/GPU/Transforms/ROCDLAttachTarget.cpp
index 60f6a74748828..1f44ffa52e068 100644
--- a/mlir/lib/Dialect/GPU/Transforms/ROCDLAttachTarget.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/ROCDLAttachTarget.cpp
@@ -69,7 +69,7 @@ DictionaryAttr ROCDLAttachTarget::getFlags(OpBuilder &builder) const {
void ROCDLAttachTarget::runOnOperation() {
OpBuilder builder(&getContext());
ArrayRef<std::string> libs(linkLibs);
- SmallVector<StringRef> filesToLink(libs.begin(), libs.end());
+ SmallVector<StringRef> filesToLink(libs);
auto target = builder.getAttr<ROCDLTargetAttr>(
optLevel, triple, chip, features, abiVersion, getFlags(builder),
filesToLink.empty() ? nullptr : builder.getStrArrayAttr(filesToLink));
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 9baf358a95503..48b3abbeee701 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -1117,10 +1117,8 @@ transform::InterchangeOp::applyToOne(transform::TransformRewriter &rewriter,
<< ") different from the number of loops in the target operation ("
<< numLoops << ")";
}
- FailureOr<GenericOp> res =
- interchangeGenericOp(rewriter, target,
- SmallVector<unsigned>(interchangeVector.begin(),
- interchangeVector.end()));
+ FailureOr<GenericOp> res = interchangeGenericOp(
+ rewriter, target, SmallVector<unsigned>(interchangeVector));
if (failed(res))
return emitDefiniteFailure() << "failed to apply";
results.push_back(res->getOperation());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
index a0faeb524c57d..785330736b39a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
@@ -79,8 +79,7 @@ mlir::linalg::interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp,
ArrayRef<Attribute> itTypes = genericOp.getIteratorTypes().getValue();
SmallVector<Attribute> itTypesVector;
llvm::append_range(itTypesVector, itTypes);
- SmallVector<int64_t> permutation(interchangeVector.begin(),
- interchangeVector.end());
+ SmallVector<int64_t> permutation(interchangeVector);
applyPermutationToVector(itTypesVector, permutation);
genericOp.setIteratorTypesAttr(rewriter.getArrayAttr(itTypesVector));
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 8b0e04fb61b1b..20a99491b6644 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -48,7 +48,7 @@ static SmallVector<Value> makeCanonicalAffineApplies(OpBuilder &b, Location loc,
auto dims = map.getNumDims();
for (auto e : map.getResults()) {
auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
- SmallVector<Value> operands(vals.begin(), vals.end());
+ SmallVector<Value> operands(vals);
affine::canonicalizeMapAndOperands(&exprMap, &operands);
res.push_back(b.create<affine::AffineApplyOp>(loc, exprMap, operands));
}
@@ -133,7 +133,7 @@ static void emitScalarImplementation(OpBuilder &b, Location loc,
SmallVector<Value> indexedValues;
indexedValues.reserve(linalgOp->getNumOperands());
- auto allIvsPlusDims = SmallVector<Value>(allIvs.begin(), allIvs.end());
+ auto allIvsPlusDims = SmallVector<Value>(allIvs);
// TODO: Avoid the loads if the corresponding argument of the
// region has no uses.
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index fb6ab2055e7dd..31f37334ce397 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -53,7 +53,7 @@ mlir::linalg::makeTiledLoopRanges(RewriterBase &b, Location loc, AffineMap map,
// Apply `map` to get shape sizes in loop order.
SmallVector<OpFoldResult> shapeSizes =
makeComposedFoldedMultiResultAffineApply(b, loc, map, allShapeSizes);
- SmallVector<OpFoldResult> tileSizes(allTileSizes.begin(), allTileSizes.end());
+ SmallVector<OpFoldResult> tileSizes(allTileSizes);
// Traverse the tile sizes, which are in loop order, erase zeros everywhere.
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index 2133458efe74c..fbff91a94219c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -460,8 +460,7 @@ struct LinalgOpPartialReductionInterface
Location loc, ValueRange partialReduce,
ArrayRef<int> reductionDims) const {
auto linalgOp = cast<LinalgOp>(op);
- SmallVector<int64_t> reductionDimsInt64(reductionDims.begin(),
- reductionDims.end());
+ SmallVector<int64_t> reductionDimsInt64(reductionDims);
auto reduction = b.create<linalg::ReduceOp>(
loc, partialReduce, linalgOp.getDpsInits(), reductionDimsInt64,
[&linalgOp](OpBuilder &b, Location loc, ValueRange inputs) {
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index f30ef235e9cd3..0e5e563ed5450 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -904,7 +904,7 @@ linalg::packMatmulGreedily(RewriterBase &rewriter, LinalgOp linalgOp,
LinalgTilingOptions &
mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
assert(!tileSizeComputationFunction && "tile sizes already set");
- SmallVector<int64_t, 4> tileSizes(ts.begin(), ts.end());
+ SmallVector<int64_t, 4> tileSizes(ts);
tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
OpBuilder::InsertionGuard guard(b);
b.setInsertionPointToStart(
diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 74f6d97aeea53..fa0598dd96885 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -264,8 +264,7 @@ GenericOp makeTransposeOp(OpBuilder &b, Location loc, Value inputTensor,
// Compute the transpose and the indentity indexing maps.
SmallVector<AffineMap> indexingMaps = {
inversePermutation(AffineMap::getPermutationMap(
- SmallVector<unsigned>(transposeVector.begin(), transposeVector.end()),
- b.getContext())),
+ SmallVector<unsigned>(transposeVector), b.getContext())),
AffineMap::getMultiDimIdentityMap(transposeVector.size(),
b.getContext())};
SmallVector<utils::IteratorType> iteratorTypes(transposeVector.size(),
diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
index d6fe22158d002..f0503555bfe4b 100644
--- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
@@ -122,7 +122,7 @@ handleMultidimensionalVectors(ImplicitLocOpBuilder &builder,
// Maybe expand operands to the higher rank vector shape that we'll use to
// iterate over and extract one dimensional vectors.
- SmallVector<int64_t> expandedShape(inputShape.begin(), inputShape.end());
+ SmallVector<int64_t> expandedShape(inputShape);
SmallVector<Value> expandedOperands(operands);
if (expansionDim > 1) {
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index f9e8a27973178..779ffbfc23f4d 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -154,7 +154,7 @@ static void constifyIndexValues(
/// expected for `getAttributes` in `constifyIndexValues`.
static SmallVector<int64_t> getConstantSizes(MemRefType memRefTy) {
ArrayRef<int64_t> sizes = memRefTy.getShape();
- return SmallVector<int64_t>(sizes.begin(), sizes.end());
+ return SmallVector<int64_t>(sizes);
}
/// Wrapper around `getStridesAndOffset` that returns only the offset and
diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
index 7d3d868b326c6..733fde78e4259 100644
--- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
+++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
@@ -740,9 +740,9 @@ static std::tuple<SmallVector<int64_t>, SmallVector<int64_t>,
SmallVector<int64_t>>
makeVectorShapes(ArrayRef<int64_t> lhs, ArrayRef<int64_t> rhs,
ArrayRef<int64_t> res) {
- SmallVector<int64_t> vlhs{lhs.begin(), lhs.end()};
- SmallVector<int64_t> vrhs{rhs.begin(), rhs.end()};
- SmallVector<int64_t> vres{res.begin(), res.end()};
+ SmallVector<int64_t> vlhs{lhs};
+ SmallVector<int64_t> vrhs{rhs};
+ SmallVector<int64_t> vres{res};
return std::make_tuple(vlhs, vrhs, vres);
}
@@ -758,7 +758,7 @@ MmaSyncBuilder::getIndexCalculators(ArrayRef<int64_t> opShape,
&MmaSyncBuilder::m16n8k4tf32Rhs,
&MmaSyncBuilder::m16n8k4tf32Res),
makeVectorShapes({2, 1}, {1, 1}, {2, 2}),
- SmallVector<int64_t>{opShape.begin(), opShape.end()},
+ SmallVector<int64_t>{opShape},
/*tf32Enabled=*/true};
}
// This is the version with f16 accumulation.
@@ -769,7 +769,7 @@ MmaSyncBuilder::getIndexCalculators(ArrayRef<int64_t> opShape,
&MmaSyncBuilder::m16n8k16f16Rhs,
&MmaSyncBuilder::m16n8k16f16Res),
makeVectorShapes({4, 2}, {2, 2}, {2, 2}),
- SmallVector<int64_t>{opShape.begin(), opShape.end()},
+ SmallVector<int64_t>{opShape},
/*tf32Enabled=*/false};
}
return failure();
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index 9df6e24de178f..40f82557d2eb8 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -1184,7 +1184,7 @@ SmallVector<Loops, 8> mlir::tile(ArrayRef<scf::ForOp> forOps,
ArrayRef<Value> sizes,
ArrayRef<scf::ForOp> targets) {
SmallVector<SmallVector<scf::ForOp, 8>, 8> res;
- SmallVector<scf::ForOp, 8> currentTargets(targets.begin(), targets.end());
+ SmallVector<scf::ForOp, 8> currentTargets(targets);
for (auto it : llvm::zip(forOps, sizes)) {
auto step = stripmineSink(std::get<0>(it), std::get<1>(it), currentTargets);
res.push_back(step);
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp
index 3f25696aa5eb6..3808620bdffa6 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp
@@ -1039,7 +1039,7 @@ StructType::get(ArrayRef<Type> memberTypes,
assert(!memberTypes.empty() && "Struct needs at least one member type");
// Sort the decorations.
SmallVector<StructType::MemberDecorationInfo, 4> sortedDecorations(
- memberDecorations.begin(), memberDecorations.end());
+ memberDecorations);
llvm::array_pod_sort(sortedDecorations.begin(), sortedDecorations.end());
return Base::get(memberTypes.vec().front().getContext(),
/*identifier=*/StringRef(), memberTypes, offsetInfo,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
index 248e9413b6e0d..0c5912bb73772 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
@@ -132,8 +132,7 @@ static void forEachIJPairInAllBuffers(
function_ref<void(uint64_t, Value, Value, Value)> bodyBuilder) {
// Create code for the first (xPerm + ny) buffers.
- SmallVector<AffineExpr> exps(xPerm.getResults().begin(),
- xPerm.getResults().end());
+ SmallVector<AffineExpr> exps(xPerm.getResults());
for (unsigned y = 0; y < ny; y++) {
exps.push_back(builder.getAffineDimExpr(y + xPerm.getNumResults()));
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
index 7a2400b41b045..d1c95dabd88a5 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
@@ -112,7 +112,7 @@ static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
VectorType vtp = vectorType(vl, mem);
Value pass = constantZero(rewriter, loc, vtp);
if (llvm::isa<VectorType>(idxs.back().getType())) {
- SmallVector<Value> scalarArgs(idxs.begin(), idxs.end());
+ SmallVector<Value> scalarArgs(idxs);
Value indexVec = idxs.back();
scalarArgs.back() = constantIndex(rewriter, loc, 0);
return rewriter.create<vector::GatherOp>(loc, vtp, mem, scalarArgs,
@@ -129,7 +129,7 @@ static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem,
ArrayRef<Value> idxs, Value vmask, Value rhs) {
if (llvm::isa<VectorType>(idxs.back().getType())) {
- SmallVector<Value> scalarArgs(idxs.begin(), idxs.end());
+ SmallVector<Value> scalarArgs(idxs);
Value indexVec = idxs.back();
scalarArgs.back() = constantIndex(rewriter, loc, 0);
rewriter.create<vector::ScatterOp>(loc, mem, scalarArgs, indexVec, vmask,
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 0751ffc419cbf..e11c6aaccf74d 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -178,8 +178,7 @@ static llvm::SmallBitVector getDroppedDims(ArrayRef<int64_t> reducedShape,
static RankedTensorType
foldDynamicToStaticDimSizes(RankedTensorType type, ValueRange dynamicSizes,
SmallVector<Value> &foldedDynamicSizes) {
- SmallVector<int64_t> staticShape(type.getShape().begin(),
- type.getShape().end());
+ SmallVector<int64_t> staticShape(type.getShape());
assert(type.getNumDynamicDims() ==
static_cast<int64_t>(dynamicSizes.size()) &&
"incorrect number of dynamic sizes");
@@ -2810,8 +2809,7 @@ struct InsertSliceOpSourceCastInserter final
RankedTensorType srcType = insertSliceOp.getSourceType();
if (srcType.getRank() != insertSliceOp.getDestType().getRank())
return failure();
- SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
- srcType.getShape().end());
+ SmallVector<int64_t> newSrcShape(srcType.getShape());
for (int64_t i = 0; i < srcType.getRank(); ++i) {
if (std::optional<int64_t> constInt =
getConstantIntValue(insertSliceOp.getMixedSizes()[i])) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
index 9b2a97eb2b006..361340a4e62f2 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
@@ -126,8 +126,8 @@ struct PackOpTiling
// The tiling is applied on interchanged dimensions. We have to undo the
// interchange to map sizes and offsets to the original input.
int64_t inputRank = packOp.getSourceRank();
- SmallVector<OpFoldResult> origOffsets(offsets.begin(), offsets.end());
- SmallVector<OpFoldResult> origSizes(sizes.begin(), sizes.end());
+ SmallVector<OpFoldResult> origOffsets(offsets);
+ SmallVector<OpFoldResult> origSizes(sizes);
applyPermToRange(origOffsets, origSizes,
invertPermutationVector(packOp.getOuterDimsPerm()));
@@ -486,9 +486,8 @@ struct UnPackOpTiling
// The tiling is applied on interchanged dimensions. We have to undo the
// interchange to map sizes and offsets to the original input.
int64_t outputRank = unPackOp.getDestRank();
- SmallVector<OpFoldResult> origOffsets(destOffsets.begin(),
- destOffsets.end());
- SmallVector<OpFoldResult> origSizes(destSizes.begin(), destSizes.end());
+ SmallVector<OpFoldResult> origOffsets(destOffsets);
+ SmallVector<OpFoldResult> origSizes(destSizes);
applyPermToRange(origOffsets, origSizes,
invertPermutationVector(unPackOp.getOuterDimsPerm()));
diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
index 8909c5879774a..a0d8a08fc6ba4 100644
--- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
@@ -63,8 +63,7 @@ mlir::tensor::computeTransposedType(RankedTensorType rankedTensorType,
transposeVector.size() != static_cast<size_t>(rankedTensorType.getRank()))
return failure();
- SmallVector<int64_t> transposedShape(rankedTensorType.getShape().begin(),
- rankedTensorType.getShape().end());
+ SmallVector<int64_t> transposedShape(rankedTensorType.getShape());
applyPermutationToVector(transposedShape, transposeVector);
using RTTBuilder = RankedTensorType::Builder;
diff --git a/mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp b/mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp
index e2b314f9db102..f8f85e4615c50 100644
--- a/mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp
+++ b/mlir/lib/Dialect/Transform/Interfaces/TransformInterfaces.cpp
@@ -237,7 +237,7 @@ transform::TransformState::setPayloadOps(Value value,
// Setting new payload for the value without cleaning it first is a misuse of
// the API, assert here.
- SmallVector<Operation *> storedTargets(targets.begin(), targets.end());
+ SmallVector<Operation *> storedTargets(targets);
Mappings &mappings = getMapping(value);
bool inserted =
mappings.direct.insert({value, std::move(storedTargets)}).second;
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 5047bd925d4c5..c2ce4c268d84b 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -3684,7 +3684,7 @@ class StridedSliceNonSplatConstantFolder final
SmallVector<int64_t, 4> offsets(sliceRank, 0);
copy(getI64SubArray(extractStridedSliceOp.getOffsets()), offsets.begin());
- SmallVector<int64_t, 4> sizes(sourceShape.begin(), sourceShape.end());
+ SmallVector<int64_t, 4> sizes(sourceShape);
copy(getI64SubArray(extractStridedSliceOp.getSizes()), sizes.begin());
// Calculate the slice elements by enumerating all slice positions and
@@ -5576,8 +5576,7 @@ OpFoldResult BitCastOp::fold(FoldAdaptor adaptor) {
static SmallVector<int64_t, 8> extractShape(MemRefType memRefType) {
auto vectorType = llvm::dyn_cast<VectorType>(memRefType.getElementType());
- SmallVector<int64_t, 8> res(memRefType.getShape().begin(),
- memRefType.getShape().end());
+ SmallVector<int64_t, 8> res(memRefType.getShape());
if (vectorType)
res.append(vectorType.getShape().begin(), vectorType.getShape().end());
return res;
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp
index 92fddc13d6333..a1f67bd0e9ed3 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp
@@ -130,12 +130,12 @@ struct ScanToArithOps : public OpRewritePattern<vector::ScanOp> {
VectorType initialValueType = scanOp.getInitialValueType();
int64_t initialValueRank = initialValueType.getRank();
- SmallVector<int64_t> reductionShape(destShape.begin(), destShape.end());
+ SmallVector<int64_t> reductionShape(destShape);
reductionShape[reductionDim] = 1;
VectorType reductionType = VectorType::get(reductionShape, elType);
SmallVector<int64_t> offsets(destRank, 0);
SmallVector<int64_t> strides(destRank, 1);
- SmallVector<int64_t> sizes(destShape.begin(), destShape.end());
+ SmallVector<int64_t> sizes(destShape);
sizes[reductionDim] = 1;
ArrayAttr scanSizes = rewriter.getI64ArrayAttr(sizes);
ArrayAttr scanStrides = rewriter.getI64ArrayAttr(strides);
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index b3c6dec47f6be..344cfc0cbffb9 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -462,8 +462,7 @@ struct TransferReadToVectorLoadLowering
// If there is broadcasting involved then we first load the unbroadcasted
// vector, and then broadcast it with `vector.broadcast`.
ArrayRef<int64_t> vectorShape = read.getVectorType().getShape();
- SmallVector<int64_t> unbroadcastedVectorShape(vectorShape.begin(),
- vectorShape.end());
+ SmallVector<int64_t> unbroadcastedVectorShape(vectorShape);
for (unsigned i : broadcastedDims)
unbroadcastedVectorShape[i] = 1;
VectorType unbroadcastedVectorType = read.getVectorType().cloneWith(
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
index a67e03e85f714..7285ad65fb549 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
@@ -443,8 +443,7 @@ static vector::TransferWriteOp cloneWriteOp(RewriterBase &rewriter,
/// d1) and return vector<16x2x64>
static VectorType getDistributedType(VectorType originalType, AffineMap map,
int64_t warpSize) {
- SmallVector<int64_t> targetShape(originalType.getShape().begin(),
- originalType.getShape().end());
+ SmallVector<int64_t> targetShape(originalType.getShape());
for (unsigned i = 0, e = map.getNumResults(); i < e; i++) {
unsigned position = map.getDimPosition(i);
if (targetShape[position] % warpSize != 0) {
@@ -1293,8 +1292,7 @@ struct WarpOpExtract : public OpRewritePattern<WarpExecuteOnLane0Op> {
(void)distributedDim;
// Yield source vector from warp op.
- SmallVector<int64_t> newDistributedShape(extractSrcType.getShape().begin(),
- extractSrcType.getShape().end());
+ SmallVector<int64_t> newDistributedShape(extractSrcType.getShape());
for (int i = 0; i < distributedType.getRank(); ++i)
newDistributedShape[i + extractOp.getNumIndices()] =
distributedType.getDimSize(i);
@@ -1562,8 +1560,7 @@ struct WarpOpInsert : public OpRewritePattern<WarpExecuteOnLane0Op> {
// Compute the distributed source vector type.
VectorType srcVecType = cast<VectorType>(insertOp.getSourceType());
- SmallVector<int64_t> distrSrcShape(srcVecType.getShape().begin(),
- srcVecType.getShape().end());
+ SmallVector<int64_t> distrSrcShape(srcVecType.getShape());
// E.g.: vector.insert %s, %d [2] : vector<96xf32> into vector<128x96xf32>
// Case 1: distrDestDim = 1 (dim of size 96). In that case, each lane will
// insert a smaller vector<3xf32>.
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
index b3f558c3bac12..800c1d9fb1dbf 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
@@ -41,7 +41,7 @@ static SmallVector<Value> sliceTransferIndices(ArrayRef<int64_t> elementOffsets,
return false;
};
// Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'.
- SmallVector<Value> slicedIndices(indices.begin(), indices.end());
+ SmallVector<Value> slicedIndices(indices);
for (const auto &dim : llvm::enumerate(permutationMap.getResults())) {
if (isBroadcast(dim.value()))
continue;
diff --git a/mlir/lib/IR/AffineExpr.cpp b/mlir/lib/IR/AffineExpr.cpp
index 7978b35e7147d..fc7ede279643e 100644
--- a/mlir/lib/IR/AffineExpr.cpp
+++ b/mlir/lib/IR/AffineExpr.cpp
@@ -1019,8 +1019,7 @@ AffineExpr AffineExpr::operator%(AffineExpr other) const {
}
AffineExpr AffineExpr::compose(AffineMap map) const {
- SmallVector<AffineExpr, 8> dimReplacements(map.getResults().begin(),
- map.getResults().end());
+ SmallVector<AffineExpr, 8> dimReplacements(map.getResults());
return replaceDimsAndSymbols(dimReplacements, {});
}
raw_ostream &mlir::operator<<(raw_ostream &os, AffineExpr expr) {
diff --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp
index 859fb8ebc10e8..5cbd0b090492b 100644
--- a/mlir/lib/IR/AffineMap.cpp
+++ b/mlir/lib/IR/AffineMap.cpp
@@ -759,7 +759,7 @@ AffineMap mlir::simplifyAffineMap(AffineMap map) {
AffineMap mlir::removeDuplicateExprs(AffineMap map) {
auto results = map.getResults();
- SmallVector<AffineExpr, 4> uniqueExprs(results.begin(), results.end());
+ SmallVector<AffineExpr, 4> uniqueExprs(results);
uniqueExprs.erase(llvm::unique(uniqueExprs), uniqueExprs.end());
return AffineMap::get(map.getNumDims(), map.getNumSymbols(), uniqueExprs,
map.getContext());
@@ -939,9 +939,8 @@ mlir::expandDimsToRank(AffineMap map, int64_t rank,
//===----------------------------------------------------------------------===//
MutableAffineMap::MutableAffineMap(AffineMap map)
- : results(map.getResults().begin(), map.getResults().end()),
- numDims(map.getNumDims()), numSymbols(map.getNumSymbols()),
- context(map.getContext()) {}
+ : results(map.getResults()), numDims(map.getNumDims()),
+ numSymbols(map.getNumSymbols()), context(map.getContext()) {}
void MutableAffineMap::reset(AffineMap map) {
results.clear();
diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp
index a72ccb9ca490c..bda96509e4c0a 100644
--- a/mlir/lib/IR/OperationSupport.cpp
+++ b/mlir/lib/IR/OperationSupport.cpp
@@ -431,7 +431,7 @@ MutableOperandRange::MutableOperandRange(
Operation *owner, unsigned start, unsigned length,
ArrayRef<OperandSegment> operandSegments)
: owner(owner), start(start), length(length),
- operandSegments(operandSegments.begin(), operandSegments.end()) {
+ operandSegments(operandSegments) {
assert((start + length) <= owner->getNumOperands() && "invalid range");
}
MutableOperandRange::MutableOperandRange(Operation *owner)
diff --git a/mlir/lib/Interfaces/FunctionInterfaces.cpp b/mlir/lib/Interfaces/FunctionInterfaces.cpp
index 8b6f7110c2cf0..80f47a3f83676 100644
--- a/mlir/lib/Interfaces/FunctionInterfaces.cpp
+++ b/mlir/lib/Interfaces/FunctionInterfaces.cpp
@@ -146,7 +146,7 @@ static void setArgResAttrDict(FunctionOpInterface op, unsigned numTotalIndices,
return removeArgResAttrs<isArg>(op);
// Otherwise, create a new attribute array with the updated dictionary.
- SmallVector<Attribute, 8> newAttrs(rawAttrArray.begin(), rawAttrArray.end());
+ SmallVector<Attribute, 8> newAttrs(rawAttrArray);
newAttrs[index] = attrs;
setArgResAttrs<isArg>(op, ArrayAttr::get(op->getContext(), newAttrs));
}
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index 9e45cf352940b..af2f2cf616fdd 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -689,7 +689,7 @@ static Type getVectorTypeForAttr(Type type, ArrayRef<int64_t> arrayShape = {}) {
if (!isScalarType(elementType))
return {};
- SmallVector<int64_t> shape(arrayShape.begin(), arrayShape.end());
+ SmallVector<int64_t> shape(arrayShape);
shape.push_back(numElements.getKnownMinValue());
return VectorType::get(shape, elementType);
}
diff --git a/mlir/lib/Transforms/Mem2Reg.cpp b/mlir/lib/Transforms/Mem2Reg.cpp
index a452cc3fae8ac..1f6998709ae02 100644
--- a/mlir/lib/Transforms/Mem2Reg.cpp
+++ b/mlir/lib/Transforms/Mem2Reg.cpp
@@ -639,8 +639,7 @@ LogicalResult mlir::tryToPromoteMemorySlots(
// lazily and cached to avoid expensive recomputation.
BlockIndexCache blockIndexCache;
- SmallVector<PromotableAllocationOpInterface> workList(allocators.begin(),
- allocators.end());
+ SmallVector<PromotableAllocationOpInterface> workList(allocators);
SmallVector<PromotableAllocationOpInterface> newWorkList;
newWorkList.reserve(workList.size());
diff --git a/mlir/lib/Transforms/SROA.cpp b/mlir/lib/Transforms/SROA.cpp
index 1c0efa06518dd..dc902cc63e0b5 100644
--- a/mlir/lib/Transforms/SROA.cpp
+++ b/mlir/lib/Transforms/SROA.cpp
@@ -200,8 +200,7 @@ LogicalResult mlir::tryToDestructureMemorySlots(
SROAStatistics statistics) {
bool destructuredAny = false;
- SmallVector<DestructurableAllocationOpInterface> workList(allocators.begin(),
- allocators.end());
+ SmallVector<DestructurableAllocationOpInterface> workList(allocators);
SmallVector<DestructurableAllocationOpInterface> newWorkList;
newWorkList.reserve(allocators.size());
// Destructuring a slot can allow for further destructuring of other
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index c978699e179fc..592e24af94d67 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -96,15 +96,14 @@ struct TestVectorToVectorLowering
return std::nullopt;
dstVec = vecType;
}
- return SmallVector<int64_t>(dstVec.getShape().begin(),
- dstVec.getShape().end());
+ return SmallVector<int64_t>(dstVec.getShape());
}
if (auto writeOp = dyn_cast<vector::TransferWriteOp>(op)) {
auto insert = writeOp.getVector().getDefiningOp<InsertStridedSliceOp>();
if (!insert)
return std::nullopt;
ArrayRef<int64_t> shape = insert.getSourceVectorType().getShape();
- return SmallVector<int64_t>(shape.begin(), shape.end());
+ return SmallVector<int64_t>(shape);
}
return std::nullopt;
}
More information about the Mlir-commits
mailing list