[Mlir-commits] [mlir] e4853be - Apply clang-tidy fixes for performance-for-range-copy to MLIR (NFC)
Mehdi Amini
llvmlistbot at llvm.org
Sun Jan 2 14:20:37 PST 2022
Author: Mehdi Amini
Date: 2022-01-02T22:19:56Z
New Revision: e4853be2f130c3e27f6c84fc4ad1d66d8b9a3810
URL: https://github.com/llvm/llvm-project/commit/e4853be2f130c3e27f6c84fc4ad1d66d8b9a3810
DIFF: https://github.com/llvm/llvm-project/commit/e4853be2f130c3e27f6c84fc4ad1d66d8b9a3810.diff
LOG: Apply clang-tidy fixes for performance-for-range-copy to MLIR (NFC)
Added:
Modified:
mlir/lib/Analysis/AffineStructures.cpp
mlir/lib/Analysis/LoopAnalysis.cpp
mlir/lib/Analysis/NumberOfExecutions.cpp
mlir/lib/Analysis/SliceAnalysis.cpp
mlir/lib/Bindings/Python/IRCore.cpp
mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
mlir/lib/Conversion/LLVMCommon/Pattern.cpp
mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp
mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
mlir/lib/Dialect/Affine/IR/AffineOps.cpp
mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
mlir/lib/Dialect/Linalg/Utils/Utils.cpp
mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
mlir/lib/Dialect/PDL/IR/PDL.cpp
mlir/lib/Dialect/SCF/SCF.cpp
mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
mlir/lib/Dialect/Shape/IR/Shape.cpp
mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp
mlir/lib/Dialect/Vector/VectorOps.cpp
mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp
mlir/lib/Dialect/Vector/VectorTransforms.cpp
mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp
mlir/lib/IR/AffineExpr.cpp
mlir/lib/IR/AffineMap.cpp
mlir/lib/IR/BuiltinTypes.cpp
mlir/lib/IR/Verifier.cpp
mlir/lib/Interfaces/ControlFlowInterfaces.cpp
mlir/lib/Interfaces/InferTypeOpInterface.cpp
mlir/lib/Reducer/ReductionTreePass.cpp
mlir/lib/Rewrite/ByteCode.cpp
mlir/lib/Rewrite/PatternApplicator.cpp
mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
mlir/lib/Transforms/BufferResultsToOutParams.cpp
mlir/lib/Transforms/PipelineDataTransfer.cpp
mlir/lib/Transforms/Utils/DialectConversion.cpp
mlir/lib/Transforms/Utils/InliningUtils.cpp
mlir/lib/Transforms/Utils/LoopUtils.cpp
mlir/lib/Transforms/Utils/RegionUtils.cpp
Removed:
################################################################################
diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp
index 205abe280d9bd..3742a24281489 100644
--- a/mlir/lib/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Analysis/AffineStructures.cpp
@@ -3346,7 +3346,7 @@ AffineMap mlir::alignAffineMapWithValues(AffineMap map, ValueRange operands,
newSyms->append(syms.begin(), syms.end());
}
- for (auto operand : llvm::enumerate(operands)) {
+ for (const auto &operand : llvm::enumerate(operands)) {
// Compute replacement dim/sym of operand.
AffineExpr replacement;
auto dimIt = std::find(dims.begin(), dims.end(), operand.value());
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 0672f25671ec9..914bc1604d391 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -353,7 +353,8 @@ bool mlir::isOpwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
// Work backwards over the body of the block so that the shift of a use's
// ancestor operation in the block gets recorded before it's looked up.
DenseMap<Operation *, uint64_t> forBodyShift;
- for (auto it : llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
+ for (const auto &it :
+ llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
auto &op = it.value();
// Get the index of the current operation, note that we are iterating in
diff --git a/mlir/lib/Analysis/NumberOfExecutions.cpp b/mlir/lib/Analysis/NumberOfExecutions.cpp
index ad30058d3d737..ad90cee92ee8b 100644
--- a/mlir/lib/Analysis/NumberOfExecutions.cpp
+++ b/mlir/lib/Analysis/NumberOfExecutions.cpp
@@ -52,7 +52,7 @@ static void computeRegionBlockNumberOfExecutions(
// Query RegionBranchOpInterface interface if it is available.
if (auto regionInterface = dyn_cast<RegionBranchOpInterface>(parentOp)) {
SmallVector<Attribute, 4> operands(parentOp->getNumOperands());
- for (auto operandIt : llvm::enumerate(parentOp->getOperands()))
+ for (const auto &operandIt : llvm::enumerate(parentOp->getOperands()))
matchPattern(operandIt.value(), m_Constant(&operands[operandIt.index()]));
regionInterface.getNumRegionInvocations(operands, numRegionsInvocations);
diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp
index b45ee4c0faae4..fa78a804175dd 100644
--- a/mlir/lib/Analysis/SliceAnalysis.cpp
+++ b/mlir/lib/Analysis/SliceAnalysis.cpp
@@ -86,7 +86,7 @@ static void getBackwardSliceImpl(Operation *op,
if (filter && !filter(op))
return;
- for (auto en : llvm::enumerate(op->getOperands())) {
+ for (const auto &en : llvm::enumerate(op->getOperands())) {
auto operand = en.value();
if (auto *definingOp = operand.getDefiningOp()) {
if (backwardSlice->count(definingOp) == 0)
diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp
index ccdd159fd4384..be2abcdd501f5 100644
--- a/mlir/lib/Bindings/Python/IRCore.cpp
+++ b/mlir/lib/Bindings/Python/IRCore.cpp
@@ -1155,7 +1155,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
resultTypes.reserve(resultTypeList.size());
if (resultSegmentSpecObj.is_none()) {
// Non-variadic result unpacking.
- for (auto it : llvm::enumerate(resultTypeList)) {
+ for (const auto &it : llvm::enumerate(resultTypeList)) {
try {
resultTypes.push_back(py::cast<PyType *>(it.value()));
if (!resultTypes.back())
@@ -1179,7 +1179,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
.str());
}
resultSegmentLengths.reserve(resultTypeList.size());
- for (auto it :
+ for (const auto &it :
llvm::enumerate(llvm::zip(resultTypeList, resultSegmentSpec))) {
int segmentSpec = std::get<1>(it.value());
if (segmentSpec == 1 || segmentSpec == 0) {
@@ -1240,7 +1240,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
operands.reserve(operands.size());
if (operandSegmentSpecObj.is_none()) {
// Non-sized operand unpacking.
- for (auto it : llvm::enumerate(operandList)) {
+ for (const auto &it : llvm::enumerate(operandList)) {
try {
operands.push_back(py::cast<PyValue *>(it.value()));
if (!operands.back())
@@ -1264,7 +1264,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
.str());
}
operandSegmentLengths.reserve(operandList.size());
- for (auto it :
+ for (const auto &it :
llvm::enumerate(llvm::zip(operandList, operandSegmentSpec))) {
int segmentSpec = std::get<1>(it.value());
if (segmentSpec == 1 || segmentSpec == 0) {
diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
index 8c269ee8a4dfb..828f0ef151203 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
@@ -21,7 +21,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
SmallVector<LLVM::GlobalOp, 3> workgroupBuffers;
workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions());
- for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
+ for (const auto &en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
Value attribution = en.value();
auto type = attribution.getType().dyn_cast<MemRefType>();
@@ -88,7 +88,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
if (!workgroupBuffers.empty())
zero = rewriter.create<LLVM::ConstantOp>(loc, i32Type,
rewriter.getI32IntegerAttr(0));
- for (auto en : llvm::enumerate(workgroupBuffers)) {
+ for (const auto &en : llvm::enumerate(workgroupBuffers)) {
LLVM::GlobalOp global = en.value();
Value address = rewriter.create<LLVM::AddressOfOp>(loc, global);
auto elementType =
@@ -111,7 +111,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
// Rewrite private memory attributions to alloca'ed buffers.
unsigned numWorkgroupAttributions = gpuFuncOp.getNumWorkgroupAttributions();
auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
- for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
+ for (const auto &en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
Value attribution = en.value();
auto type = attribution.getType().cast<MemRefType>();
assert(type && type.hasStaticShape() && "unexpected type in attribution");
diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
index 757f3828bdc7d..f7f8b6b142357 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
@@ -634,7 +634,7 @@ Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray(
arraySize, /*alignment=*/0);
auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type,
builder.getI32IntegerAttr(0));
- for (auto en : llvm::enumerate(arguments)) {
+ for (const auto &en : llvm::enumerate(arguments)) {
auto index = builder.create<LLVM::ConstantOp>(
loc, llvmInt32Type, builder.getI32IntegerAttr(en.index()));
auto fieldPtr = builder.create<LLVM::GEPOp>(
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index 7405f6f91a4fa..96dd32aaa99da 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -206,7 +206,7 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, TypeConverter &typeConverter,
// LowerABIAttributesPass.
TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
{
- for (auto argType : enumerate(funcOp.getType().getInputs())) {
+ for (const auto &argType : enumerate(funcOp.getType().getInputs())) {
auto convertedType = typeConverter.convertType(argType.value());
signatureConverter.addInputs(argType.index(), convertedType);
}
diff --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
index b8d1a8556395f..e7e64aece05d2 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
@@ -222,7 +222,7 @@ void VulkanLaunchFuncToVulkanCallsPass::createBindMemRefCalls(
Value descriptorSet = builder.create<LLVM::ConstantOp>(
loc, getInt32Type(), builder.getI32IntegerAttr(0));
- for (auto en :
+ for (const auto &en :
llvm::enumerate(cInterfaceVulkanLaunchCallOp.getOperands().drop_front(
kVulkanLaunchNumConfigOperands))) {
// Create LLVM constant for the descriptor binding index.
diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
index 47dabc90bce5b..0003bd859e479 100644
--- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
@@ -213,11 +213,11 @@ MemRefDescriptor ConvertToLLVMPattern::createMemRefDescriptor(
createIndexConstant(rewriter, loc, 0));
// Fields 4: Sizes.
- for (auto en : llvm::enumerate(sizes))
+ for (const auto &en : llvm::enumerate(sizes))
memRefDescriptor.setSize(rewriter, loc, en.index(), en.value());
// Field 5: Strides.
- for (auto en : llvm::enumerate(strides))
+ for (const auto &en : llvm::enumerate(strides))
memRefDescriptor.setStride(rewriter, loc, en.index(), en.value());
return memRefDescriptor;
diff --git a/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp b/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp
index ace5bec09f4e7..54c5b93877ff7 100644
--- a/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp
@@ -101,7 +101,7 @@ LogicalResult LLVM::detail::handleMultidimensionalVectors(
// For this unrolled `position` corresponding to the `linearIndex`^th
// element, extract operand vectors
SmallVector<Value, 4> extractedOperands;
- for (auto operand : llvm::enumerate(operands)) {
+ for (const auto &operand : llvm::enumerate(operands)) {
extractedOperands.push_back(rewriter.create<LLVM::ExtractValueOp>(
loc, operand1DVectorTypes[operand.index()], operand.value(),
position));
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index b1f7d0452ee13..9142be183174e 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -1420,7 +1420,8 @@ class TransposeOpLowering : public ConvertOpToLLVMPattern<memref::TransposeOp> {
targetMemRef.setOffset(rewriter, loc, viewMemRef.offset(rewriter, loc));
// Iterate over the dimensions and apply size/stride permutation.
- for (auto en : llvm::enumerate(transposeOp.permutation().getResults())) {
+ for (const auto &en :
+ llvm::enumerate(transposeOp.permutation().getResults())) {
int sourcePos = en.index();
int targetPos = en.value().cast<AffineDimExpr>().getPosition();
targetMemRef.setSize(rewriter, loc, targetPos,
diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
index 7db7dc03dc80d..367bbb55ee1b2 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
+++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
@@ -736,7 +736,7 @@ void PatternLowering::generateRewriter(
bool seenVariableLength = false;
Type valueTy = builder.getType<pdl::ValueType>();
Type valueRangeTy = pdl::RangeType::get(valueTy);
- for (auto it : llvm::enumerate(resultTys)) {
+ for (const auto &it : llvm::enumerate(resultTys)) {
Value &type = rewriteValues[it.value()];
if (type)
continue;
@@ -862,7 +862,7 @@ void PatternLowering::generateOperationResultTypeRewriter(
// Otherwise, handle inference for each of the result types individually.
OperandRange resultTypeValues = op.types();
types.reserve(resultTypeValues.size());
- for (auto it : llvm::enumerate(resultTypeValues)) {
+ for (const auto &it : llvm::enumerate(resultTypeValues)) {
Value resultType = it.value();
// Check for an already translated value.
diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
index 517f28c2044fa..c325bfb424569 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
+++ b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
@@ -162,7 +162,7 @@ static void getTreePredicates(std::vector<PositionalPredicate> &predList,
builder.getAllOperands(opPos));
} else {
bool foundVariableLength = false;
- for (auto operandIt : llvm::enumerate(operands)) {
+ for (const auto &operandIt : llvm::enumerate(operands)) {
bool isVariadic = operandIt.value().getType().isa<pdl::RangeType>();
foundVariableLength |= isVariadic;
@@ -460,7 +460,7 @@ static void buildCostGraph(ArrayRef<Value> roots, RootOrderingGraph &graph,
}
// Default case: visit all the operands.
- for (auto p : llvm::enumerate(operationOp.operands()))
+ for (const auto &p : llvm::enumerate(operationOp.operands()))
toVisit.emplace(p.value(), entry.value, p.index(),
entry.depth + 1);
})
diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index f3547e580501e..d2faff9d32389 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -261,7 +261,7 @@ void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
builder.setInsertionPointToStart(&launchOp.body().front());
auto *lbArgumentIt = lbs.begin();
auto *stepArgumentIt = steps.begin();
- for (auto en : llvm::enumerate(ivs)) {
+ for (const auto &en : llvm::enumerate(ivs)) {
Value id =
en.index() < numBlockDims
? getDim3Value(launchOp.getBlockIds(), en.index())
diff --git a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
index fd6ec82086206..6bb3da666ce77 100644
--- a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
+++ b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
@@ -387,7 +387,7 @@ WhileOpConversion::matchAndRewrite(scf::WhileOp whileOp, OpAdaptor adaptor,
// the before region, which may not matching the whole op's result. Instead,
// the scf.condition op returns values matching the whole op's results. So we
// need to create/load/store variables according to that.
- for (auto it : llvm::enumerate(condArgs)) {
+ for (const auto &it : llvm::enumerate(condArgs)) {
auto res = it.value();
auto i = it.index();
auto pointerType =
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
index bf60f4b6a211d..9f1f93f9abf7c 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
@@ -208,7 +208,7 @@ class GPULaunchLowering : public ConvertOpToLLVMPattern<gpu::LaunchFuncOp> {
SmallVector<CopyInfo, 4> copyInfo;
auto numKernelOperands = launchOp.getNumKernelOperands();
auto kernelOperands = adaptor.getOperands().take_back(numKernelOperands);
- for (auto operand : llvm::enumerate(kernelOperands)) {
+ for (const auto &operand : llvm::enumerate(kernelOperands)) {
// Check if the kernel's operand is a ranked memref.
auto memRefType = launchOp.getKernelOperand(operand.index())
.getType()
diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index feaa140cc710e..88c7f43b8dc54 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -254,7 +254,7 @@ struct FuncOpConversionBase : public ConvertOpToLLVMPattern<FuncOp> {
rewriter.getNamedAttr(function_like_impl::getArgDictAttrName(),
rewriter.getArrayAttr(newArgAttrs)));
}
- for (auto pair : llvm::enumerate(attributes)) {
+ for (const auto &pair : llvm::enumerate(attributes)) {
if (pair.value().getName() == "llvm.linkage") {
attributes.erase(attributes.begin() + pair.index());
break;
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 38c8276f28430..f28527d185c1f 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -694,7 +694,7 @@ elementwiseMatchAndRewriteHelper(Operation *operation,
SmallVector<int64_t, 5> newShape;
SmallVector<AffineExpr, 4> affineExprs;
newShape.reserve(type.getRank());
- for (auto it : llvm::enumerate(type.getShape())) {
+ for (const auto &it : llvm::enumerate(type.getShape())) {
if (it.value() == resultTy.getDimSize(it.index())) {
newShape.push_back(it.value());
affineExprs.push_back(
@@ -1175,7 +1175,7 @@ class TransposeConverter : public OpRewritePattern<tosa::TransposeOp> {
SmallVector<AffineExpr, 2> inputExprs;
inputExprs.resize(resultTy.getRank());
auto operandTy = input.getType().cast<ShapedType>();
- for (auto permutation : llvm::enumerate(perms.getValues<APInt>())) {
+ for (const auto &permutation : llvm::enumerate(perms.getValues<APInt>())) {
auto index = permutation.index();
auto value = permutation.value().getZExtValue();
if (!operandTy.hasRank() || operandTy.isDynamicDim(index)) {
diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 30bad881a3194..725264d31fc7d 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -449,7 +449,7 @@ static void convertForOp(scf::ForOp op,
llvm::DenseMap<Value, Value> &valueMapping) {
SmallVector<Value> newOperands;
SmallVector<std::pair<size_t, size_t>> argMapping;
- for (auto operand : llvm::enumerate(op.getIterOperands())) {
+ for (const auto &operand : llvm::enumerate(op.getIterOperands())) {
auto it = valueMapping.find(operand.value());
if (it == valueMapping.end())
continue;
@@ -474,7 +474,7 @@ static void convertYieldOp(scf::YieldOp op,
OpBuilder b(op);
auto loop = cast<scf::ForOp>(op->getParentOp());
auto yieldOperands = llvm::to_vector<4>(op.getOperands());
- for (auto operand : llvm::enumerate(op.getOperands())) {
+ for (const auto &operand : llvm::enumerate(op.getOperands())) {
auto it = valueMapping.find(operand.value());
if (it == valueMapping.end())
continue;
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 062a54432cea6..0a938430a5b9c 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -497,7 +497,7 @@ class VectorShuffleOpConversion
eltType = llvmType.cast<VectorType>().getElementType();
Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
int64_t insPos = 0;
- for (auto en : llvm::enumerate(maskArrayAttr)) {
+ for (const auto &en : llvm::enumerate(maskArrayAttr)) {
int64_t extPos = en.value().cast<IntegerAttr>().getInt();
Value value = adaptor.v1();
if (extPos >= v1Dim) {
@@ -883,7 +883,8 @@ class VectorTypeCastOpConversion
desc.setOffset(rewriter, loc, zero);
// Fill size and stride descriptors in memref.
- for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
+ for (const auto &indexedSize :
+ llvm::enumerate(targetMemRefType.getShape())) {
int64_t index = indexedSize.index();
auto sizeAttr =
rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index c3c1b51294801..c89588e0b87bc 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -680,7 +680,7 @@ static void composeAffineMapAndOperands(AffineMap *map,
for (auto *container : {&dims, &syms}) {
bool isDim = (container == &dims);
auto &repls = isDim ? dimReplacements : symReplacements;
- for (auto en : llvm::enumerate(*container)) {
+ for (const auto &en : llvm::enumerate(*container)) {
Value v = en.value();
if (!v) {
assert(isDim ? !map->isFunctionOfDim(en.index())
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 579c385d653c2..ea8ce177848ef 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -940,7 +940,7 @@ static LogicalResult verify(gpu::ReturnOp returnOp) {
.attachNote(function.getLoc())
.append("return type declared here");
- for (auto pair : llvm::enumerate(
+ for (const auto &pair : llvm::enumerate(
llvm::zip(function.getType().getResults(), returnOp.operands()))) {
Type type;
Value operand;
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index ac52723484384..a2e64d9c92a13 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -54,7 +54,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
// Replace the leading 12 function args with the respective thread/block index
// operations. Iterate backwards since args are erased and indices change.
- for (auto indexOp : enumerate(indexOps))
+ for (const auto &indexOp : enumerate(indexOps))
map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
}
@@ -174,7 +174,7 @@ static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
// Map arguments from gpu.launch region to the arguments of the gpu.func
// operation.
Block &entryBlock = outlinedFuncBody.front();
- for (auto operand : enumerate(operands))
+ for (const auto &operand : enumerate(operands))
map.map(operand.value(), entryBlock.getArgument(operand.index()));
// Clone the region of the gpu.launch operation into the gpu.func operation.
diff --git a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
index 55098a9c5b46f..f7c5ca8d5a774 100644
--- a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
@@ -89,7 +89,7 @@ static void insertCopyLoops(ImplicitLocOpBuilder &b, Value from, Value to) {
});
// Map the innermost loops to threads in reverse order.
- for (auto en :
+ for (const auto &en :
llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back(
GPUDialect::getNumWorkgroupDimensions())))) {
Value v = en.value();
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 66157371f30c5..77f436f109ca6 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -1485,7 +1485,7 @@ static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) {
// list is parsed, returns -1.
static int parseOptionalKeywordAlternative(OpAsmParser &parser,
ArrayRef<StringRef> keywords) {
- for (auto en : llvm::enumerate(keywords)) {
+ for (const auto &en : llvm::enumerate(keywords)) {
if (succeeded(parser.parseOptionalKeyword(en.value())))
return en.index();
}
diff --git a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
index 70117e533b194..ee5c6da544b09 100644
--- a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
+++ b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
@@ -103,7 +103,7 @@ LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, FuncOp f) {
LinalgDependenceGraph::LinalgDependenceGraph(Aliases &aliases,
ArrayRef<LinalgOp> ops)
: aliases(aliases), linalgOps(ops.begin(), ops.end()) {
- for (auto en : llvm::enumerate(linalgOps)) {
+ for (const auto &en : llvm::enumerate(linalgOps)) {
linalgOpPositions.insert(
std::make_pair(en.value().getOperation(), en.index()));
}
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index fe3363d7d0de9..083d8b75463a1 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1093,7 +1093,7 @@ static LogicalResult verify(PadTensorOp op) {
return op.emitError("expected the block to have ") << rank << " arguments";
// Note: the number and type of yield values are checked in the YieldOp.
- for (auto en : llvm::enumerate(block.getArgumentTypes())) {
+ for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
if (!en.value().isIndex())
return op.emitOpError("expected block argument ")
<< (en.index() + 1) << " to be an index";
@@ -1204,7 +1204,7 @@ PadTensorOp PadTensorOp::createPadHighOp(Type type, Value source, Value pad,
SmallVector<OpFoldResult, 4> low, high;
auto rankedTensorType = type.cast<RankedTensorType>();
assert(rankedTensorType.hasStaticShape());
- for (auto en : enumerate(rankedTensorType.getShape())) {
+ for (const auto &en : enumerate(rankedTensorType.getShape())) {
AffineExpr d0;
bindDims(b.getContext(), d0);
auto dimOp = b.createOrFold<tensor::DimOp>(loc, source, en.index());
@@ -1275,7 +1275,7 @@ SmallVector<Range> PadTensorOp::getIterationDomain(OpBuilder &b) {
// Initialize all the ranges to {zero, one, one}. All the `ub`s are
// overwritten.
SmallVector<Range> loopRanges(reifiedShapes[0].size(), {zero, one, one});
- for (auto ub : enumerate(reifiedShapes[0]))
+ for (const auto &ub : enumerate(reifiedShapes[0]))
loopRanges[ub.index()].size = ub.value();
return loopRanges;
}
@@ -2001,7 +2001,7 @@ struct TiledLoopInputsFolder : public OpRewritePattern<linalg::TiledLoopOp> {
// Store ids of the corresponding old and new input operands.
SmallVector<int64_t, 2> oldInputIdToNew(tiledLoop.inputs().size(),
kNoMatch);
- for (auto en : llvm::enumerate(
+ for (const auto &en : llvm::enumerate(
llvm::zip(tiledLoop.inputs(), tiledLoop.getRegionInputArgs()))) {
Value in, bbArg;
size_t index = en.index();
@@ -2215,7 +2215,7 @@ struct TiledLoopResultsFolder : public OpRewritePattern<linalg::TiledLoopOp> {
SmallVector<int64_t, 2> oldResultIdToNew(tiledLoop.getNumResults(),
kNoMatch);
SmallVector<Value, 2> resultReplacement(tiledLoop.getNumResults());
- for (auto en : llvm::enumerate(
+ for (const auto &en : llvm::enumerate(
llvm::zip(tiledLoop.outputs(), tiledLoop.getRegionOutputArgs()))) {
size_t index = en.index();
Value out = std::get<0>(en.value());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
index 32d5363843314..da01ec496bec7 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
@@ -43,7 +43,7 @@ allocateBuffersForResults(Location loc, LinalgOp linalgOp, ValueRange outputs,
// Allocate a buffer for every tensor result.
assert(linalgOp.getNumOutputs() == linalgOp->getNumResults());
- for (auto en : llvm::enumerate(linalgOp->getResultTypes())) {
+ for (const auto &en : llvm::enumerate(linalgOp->getResultTypes())) {
size_t resultIndex = en.index();
Type resultType = en.value();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index 521fdd9d2e89f..eaf95a3751a88 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -186,7 +186,7 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
DenseSet<unsigned> unitDims;
SmallVector<unsigned, 4> unitDimsReductionLoops;
ArrayAttr iteratorTypes = genericOp.iterator_types();
- for (auto expr : enumerate(invertedMap.getResults())) {
+ for (const auto &expr : enumerate(invertedMap.getResults())) {
if (AffineDimExpr dimExpr = expr.value().dyn_cast<AffineDimExpr>())
if (dims[dimExpr.getPosition()] == 1)
unitDims.insert(expr.index());
@@ -205,7 +205,7 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
// Compute the iterator types of the modified op by dropping the one-trip
// count loops.
SmallVector<Attribute, 4> newIteratorTypes;
- for (auto attr : llvm::enumerate(iteratorTypes)) {
+ for (const auto &attr : llvm::enumerate(iteratorTypes)) {
if (!unitDims.count(attr.index()))
newIteratorTypes.push_back(attr.value());
}
@@ -439,7 +439,7 @@ struct ReplaceUnitExtents : public OpRewritePattern<GenericOp> {
// If any result tensor has a modified shape, then add reshape to recover
// the original shape.
SmallVector<Value, 4> resultReplacements;
- for (auto result : llvm::enumerate(replacementOp.getResults())) {
+ for (const auto &result : llvm::enumerate(replacementOp.getResults())) {
unsigned index = result.index() + replacementOp.getNumInputs();
auto origResultType = genericOp.getResult(result.index()).getType();
@@ -465,7 +465,7 @@ static Optional<SmallVector<ReassociationIndices>>
getReassociationMapForFoldingUnitDims(ArrayRef<OpFoldResult> mixedSizes) {
SmallVector<ReassociationIndices> reassociation;
ReassociationIndices curr;
- for (auto it : llvm::enumerate(mixedSizes)) {
+ for (const auto &it : llvm::enumerate(mixedSizes)) {
auto dim = it.index();
auto size = it.value();
curr.push_back(dim);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 795a23d7b1d88..6fd3927c80cac 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -565,7 +565,7 @@ LogicalResult ExpansionInfo::compute(LinalgOp linalgOp,
// dimension of the original op.
SmallVector<unsigned> numExpandedDims(fusedIndexMap.getNumDims(), 1);
expandedShapeMap.resize(fusedIndexMap.getNumDims());
- for (auto resultExpr : llvm::enumerate(fusedIndexMap.getResults())) {
+ for (const auto &resultExpr : llvm::enumerate(fusedIndexMap.getResults())) {
unsigned pos = resultExpr.value().cast<AffineDimExpr>().getPosition();
AffineMap foldedDims = reassociationMaps[resultExpr.index()];
numExpandedDims[pos] = foldedDims.getNumResults();
@@ -581,7 +581,7 @@ LogicalResult ExpansionInfo::compute(LinalgOp linalgOp,
// Compute reassociation map from the original op to the expanded op.
unsigned sum = 0;
reassociation.reserve(fusedIndexMap.getNumDims());
- for (auto numFoldedDim : llvm::enumerate(numExpandedDims)) {
+ for (const auto &numFoldedDim : llvm::enumerate(numExpandedDims)) {
auto seq = llvm::seq<int64_t>(sum, sum + numFoldedDim.value());
reassociation.emplace_back(seq.begin(), seq.end());
sum += numFoldedDim.value();
@@ -861,7 +861,7 @@ struct FoldProducerReshapeOpByLinearization
if (!genericOp.hasTensorSemantics())
return failure();
SmallVector<OpOperand *> inputOperands = genericOp.getInputOperands();
- for (auto en : llvm::enumerate(inputOperands)) {
+ for (const auto &en : llvm::enumerate(inputOperands)) {
auto reshapeOp = en.value()->get().getDefiningOp<TensorReshapeOp>();
if (!reshapeOp)
continue;
@@ -976,7 +976,7 @@ struct PushExpandingReshape : public OpRewritePattern<GenericOp> {
// 1. Look for tensor_expand_shape operands and figure out save the
// dimensions merged.
SmallVector<OpOperand *> inputOperands = genericOp.getInputOperands();
- for (auto en : llvm::enumerate(inputOperands)) {
+ for (const auto &en : llvm::enumerate(inputOperands)) {
auto reshapeOp =
en.value()->get().template getDefiningOp<tensor::ExpandShapeOp>();
if (!reshapeOp)
@@ -1010,7 +1010,7 @@ struct PushExpandingReshape : public OpRewritePattern<GenericOp> {
// 2. Verify that we can merge the dimensions in the linalg and that we
// don't need to create new reshapes operands. Inserting new reshape
// operands would defeat the purpose of the transformation.
- for (auto en : llvm::enumerate(inputOperands)) {
+ for (const auto &en : llvm::enumerate(inputOperands)) {
if (en.value()->get() == newOperands[en.index()]) {
AffineMap map = genericOp.getTiedIndexingMap(en.value());
for (unsigned i : llvm::seq(unsigned(0), map.getNumResults())) {
@@ -1060,7 +1060,7 @@ struct PushExpandingReshape : public OpRewritePattern<GenericOp> {
newOp.region().begin());
// 6. Reshape the so that the type matches the uses.
SmallVector<Value> newResults;
- for (auto result : llvm::enumerate(newOp->getResults())) {
+ for (const auto &result : llvm::enumerate(newOp->getResults())) {
newResults.push_back(rewriter.create<tensor::ExpandShapeOp>(
genericOp->getLoc(), genericOp.getOutputTensorTypes()[result.index()],
result.value(), reassociation));
@@ -1407,7 +1407,7 @@ class FoldConstantBase : public OpRewritePattern<GenericOp> {
// All inputs should be constants.
int numInputs = genericOp.getNumInputs();
SmallVector<DenseIntOrFPElementsAttr> inputValues(numInputs);
- for (auto operand : llvm::enumerate(genericOp.getInputOperands())) {
+ for (const auto &operand : llvm::enumerate(genericOp.getInputOperands())) {
if (!matchPattern(operand.value()->get(),
m_Constant(&inputValues[operand.index()])))
return failure();
@@ -1712,7 +1712,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
continue;
modifiedOutput = true;
SmallVector<Value> dynamicDims;
- for (auto dim : llvm::enumerate(operandType.getShape())) {
+ for (const auto &dim : llvm::enumerate(operandType.getShape())) {
if (dim.value() != ShapedType::kDynamicSize)
continue;
dynamicDims.push_back(rewriter.createOrFold<tensor::DimOp>(
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 0e0bc1ad48d19..f426af01d8722 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -87,7 +87,7 @@ getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth,
LLVM_DEBUG(llvm::dbgs()
<< "getShapeDefiningLoopRange map: " << map << "\n");
SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr);
- for (auto en : llvm::enumerate(map.getResults())) {
+ for (const auto &en : llvm::enumerate(map.getResults())) {
auto dimExpr = en.value().dyn_cast<AffineDimExpr>();
if (!dimExpr)
continue;
@@ -250,7 +250,7 @@ static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap,
LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n");
DenseMap<unsigned, Range> fusedLoopsAndRanges;
Value shapedOperand = consumerOpOperand.get();
- for (auto en : llvm::enumerate(producerMap.getResults())) {
+ for (const auto &en : llvm::enumerate(producerMap.getResults())) {
unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition();
fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape(
b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index());
@@ -521,7 +521,7 @@ mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
static AffineMap pruneReductionDimsFromMap(ArrayRef<Attribute> iteratorTypes,
AffineMap map) {
llvm::SmallDenseSet<unsigned> projectedDims;
- for (auto attr : llvm::enumerate(iteratorTypes)) {
+ for (const auto &attr : llvm::enumerate(iteratorTypes)) {
if (!isParallelIterator(attr.value()))
projectedDims.insert(attr.index());
}
@@ -810,7 +810,7 @@ fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp,
SmallVector<LinalgOp, 1> fusedOps(fusionCandidates.size());
DenseMap<Operation *, LinalgOp> origOpToFusedOp;
origOpToFusedOp[rootOp.getOperation()] = tiledOp;
- for (auto candidate : enumerate(llvm::reverse(fusionCandidates))) {
+ for (const auto &candidate : enumerate(llvm::reverse(fusionCandidates))) {
LinalgOp origOp = candidate.value();
LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges);
origOpToFusedOp[origOp.getOperation()] = fusedOp;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
index c171de854880b..6bdcc192e27aa 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
@@ -42,7 +42,7 @@ static SmallVector<int64_t> getTiledSliceDims(OpOperand *consumerOperand,
// Search the slice dimensions tiled by a tile loop dimension.
DenseSet<int64_t> tiledSliceDimIndices;
- for (auto en : enumerate(indexingMap.getResults())) {
+ for (const auto &en : enumerate(indexingMap.getResults())) {
for (auto tiledLoopDim : tiledLoopDims) {
if (en.value().isFunctionOfDim(tiledLoopDim))
tiledSliceDimIndices.insert(en.index());
@@ -304,7 +304,7 @@ LogicalResult TileLoopNest::tileRootOp(OpBuilder &b,
// Update the root operation and append the loops and tile loop dimensions.
rootOp = tiledRootOp->op;
tileLoopOps.append(tiledRootOp->loops.begin(), tiledRootOp->loops.end());
- for (auto en : enumerate(tileSizes)) {
+ for (const auto &en : enumerate(tileSizes)) {
// Copy only the tiled loop dimensions with non-zero tile size.
if (en.value() == 0)
continue;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
index 97b586cdf762d..d8875663487d1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
@@ -346,7 +346,7 @@ void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
changed = false;
func.walk([&](scf::ForOp forOp) {
Operation *yield = forOp.getBody()->getTerminator();
- for (auto it : llvm::enumerate(forOp.getRegionIterArgs())) {
+ for (const auto &it : llvm::enumerate(forOp.getRegionIterArgs())) {
OpOperand &ret = yield->getOpOperand(it.index());
HoistableWrite write =
getLoopInvariantTransferWriteOpDefining(forOp, ret);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 6a9fb2189855f..d3936eb366cc5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -277,7 +277,7 @@ struct TiledLoopToSCFPattern : public OpRewritePattern<TiledLoopOp> {
// Collect loop control parameters for parallel and sequential dimensions.
SmallVector<Value, 3> seqLBs, seqUBs, seqSteps, seqIVs;
SmallVector<Value, 3> parLBs, parUBs, parSteps, parIVs;
- for (auto en : llvm::enumerate(
+ for (const auto &en : llvm::enumerate(
llvm::zip(tiledLoop.lowerBound(), tiledLoop.upperBound(),
tiledLoop.step(), tiledLoop.getInductionVars()))) {
Value lb, ub, step, iv;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 7fa2aed8dfd8d..fb281b319f67c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -87,7 +87,7 @@ defaultAllocBufferCallBack(const LinalgPromotionOptions &options,
auto one = b.createOrFold<arith::ConstantIndexOp>(1);
Value allocSize = one;
- for (auto size : llvm::enumerate(boundingSubViewSize))
+ for (const auto &size : llvm::enumerate(boundingSubViewSize))
allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
layout, alignment);
@@ -219,7 +219,7 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
SmallVector<OpFoldResult> partialSizes;
fullSizes.reserve(rank);
partialSizes.reserve(rank);
- for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
+ for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
auto rangeValue = en.value();
// Try to extract a tight constant.
LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index b4d2860101fd2..cb2987973ea51 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -181,7 +181,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
SmallVector<Attribute, 4> iteratorTypes;
- for (auto attr :
+ for (const auto &attr :
enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
if (loopIndexToRangeIndex.count(attr.index()))
iteratorTypes.push_back(attr.value());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 6d2af0c1cece6..8156c5d45744c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -194,7 +194,7 @@ static LogicalResult padOperandToSmallestStaticBoundingBox(
SmallVector<int64_t> staticSizes;
staticSizes.reserve(shape.size());
auto shapedOp = cast<OffsetSizeAndStrideOpInterface>(sliceOp.getOperation());
- for (auto en : enumerate(shapedOp.getMixedSizes())) {
+ for (const auto &en : enumerate(shapedOp.getMixedSizes())) {
// Skip dropped dimensions.
if (droppedDims.contains(en.index()))
continue;
@@ -269,7 +269,7 @@ linalg::rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad,
// linalg op around because it uses the dims of the original results.
SmallVector<Value> paddedSubviewResults;
paddedSubviewResults.reserve(opToPad->getNumResults());
- for (auto en : llvm::enumerate(paddedOp->getResults())) {
+ for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
Value paddedResult = en.value();
int64_t resultNumber = en.index();
int64_t rank = paddedResult.getType().cast<RankedTensorType>().getRank();
@@ -443,7 +443,7 @@ LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite(
// Tile the unfused loops;
SmallVector<Value, 4> unfusedLoopTileSizes;
Value zero = rewriter.create<arith::ConstantIndexOp>(op->getLoc(), 0);
- for (auto tileSize : enumerate(tileSizes)) {
+ for (const auto &tileSize : enumerate(tileSizes)) {
if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index()))
unfusedLoopTileSizes.push_back(zero);
else
@@ -524,7 +524,7 @@ LogicalResult mlir::linalg::LinalgPaddingPattern::matchAndRewrite(
}
// Hoist the padding.
- for (auto en : enumerate(depths)) {
+ for (const auto &en : enumerate(depths)) {
OpOperand &opOperand = paddedOp->getOpOperand(en.index());
auto padTensorOp = opOperand.get().getDefiningOp<PadTensorOp>();
if (!padTensorOp || en.value() == 0)
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index ab22ee0e0de55..5fda632b2f860 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -249,7 +249,7 @@ vectorizeLinalgYield(OpBuilder &b, Operation *op,
auto yieldOp = dyn_cast<linalg::YieldOp>(op);
if (!yieldOp)
return VectorizationResult{VectorizationStatus::Failure, nullptr};
- for (auto outputs : llvm::enumerate(yieldOp.values())) {
+ for (const auto &outputs : llvm::enumerate(yieldOp.values())) {
// TODO: Scan for an opportunity for reuse.
// TODO: use a map.
Value vectorValue = bvm.lookup(outputs.value());
diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index a54582493a36b..a197c141403b6 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -169,7 +169,7 @@ Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim) {
SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b) {
SmallVector<Value, 4> dynOperands;
auto shapedType = val.getType().cast<ShapedType>();
- for (auto dim : llvm::enumerate(shapedType.getShape())) {
+ for (const auto &dim : llvm::enumerate(shapedType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
}
@@ -310,7 +310,7 @@ tensor::ExtractSliceOp makeComposedExtractSliceOp(
SmallVector<OpFoldResult> foldedOffsets(offsets.begin(), offsets.end());
AffineExpr dim1, dim2;
bindDims(b.getContext(), dim1, dim2);
- for (auto en : enumerate(producerOp.getMixedOffsets())) {
+ for (const auto &en : enumerate(producerOp.getMixedOffsets())) {
SmallVector<Value> offsetValues = {
getValueOrCreateConstantIndexOp(b, loc, foldedOffsets[en.index()]),
getValueOrCreateConstantIndexOp(b, loc, en.value())};
@@ -403,7 +403,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
if (distributionOptions.hasValue()) {
// Collect loop ranges for parallel dimensions.
SmallVector<Range, 2> parallelLoopRanges;
- for (auto iteratorType : enumerate(iteratorTypes))
+ for (const auto &iteratorType : enumerate(iteratorTypes))
if (isParallelIterator(iteratorType.value()))
parallelLoopRanges.push_back(loopRanges[iteratorType.index()]);
@@ -435,7 +435,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
// Filter out scf.for loops that were created out of parallel dimensions.
SmallVector<scf::ForOp, 4> loops;
- for (auto iteratorType : enumerate(iteratorTypes))
+ for (const auto &iteratorType : enumerate(iteratorTypes))
if (isParallelIterator(iteratorType.value()))
loops.push_back(loopNest.loops[iteratorType.index()]);
@@ -677,7 +677,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
distributionMethod.assign(distributionOptions->distributionMethod.begin(),
distributionOptions->distributionMethod.end());
SmallVector<Range, 2> parallelLoopRanges;
- for (auto iteratorType : enumerate(iteratorTypes)) {
+ for (const auto &iteratorType : enumerate(iteratorTypes)) {
if (isParallelIterator(iteratorType.value()))
parallelLoopRanges.push_back(loopRanges[iteratorType.index()]);
}
@@ -686,7 +686,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
SmallVector<ProcInfo, 2> procInfo =
options.procInfo(b, loc, parallelLoopRanges);
unsigned index = 0;
- for (auto iteratorType : enumerate(iteratorTypes)) {
+ for (const auto &iteratorType : enumerate(iteratorTypes)) {
if (index >= procInfo.size())
break;
if (isParallelIterator(iteratorType.value())) {
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index d2f989b561e44..ced119aea1a46 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -395,7 +395,7 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
};
if (!checkCompatible(aOffset, bOffset))
return false;
- for (auto aStride : enumerate(aStrides))
+ for (const auto &aStride : enumerate(aStrides))
if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
return false;
}
@@ -515,7 +515,7 @@ computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
if (originalType.getRank() == reducedType.getRank())
return unusedDims;
- for (auto dim : llvm::enumerate(sizes))
+ for (const auto &dim : llvm::enumerate(sizes))
if (auto attr = dim.value().dyn_cast<Attribute>())
if (attr.cast<IntegerAttr>().getInt() == 1)
unusedDims.insert(dim.index());
@@ -1851,7 +1851,7 @@ static MemRefType getCanonicalSubViewResultType(
if (!unusedDims)
return nullptr;
SmallVector<int64_t> shape;
- for (auto sizes : llvm::enumerate(nonRankReducedType.getShape())) {
+ for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
if (unusedDims->count(sizes.index()))
continue;
shape.push_back(sizes.value());
@@ -1903,7 +1903,7 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
// Check all size values are static and matches the (static) source shape.
ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
- for (auto size : llvm::enumerate(mixedSizes)) {
+ for (const auto &size : llvm::enumerate(mixedSizes)) {
Optional<int64_t> intValue = getConstantIntValue(size.value());
if (!intValue || intValue.getValue() != sourceShape[size.index()])
return false;
@@ -2040,7 +2040,7 @@ static MemRefType inferTransposeResultType(MemRefType memRefType,
auto originalSizes = memRefType.getShape();
// Compute permuted sizes.
SmallVector<int64_t, 4> sizes(rank, 0);
- for (auto en : llvm::enumerate(permutationMap.getResults()))
+ for (const auto &en : llvm::enumerate(permutationMap.getResults()))
sizes[en.index()] =
originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 0b4346ddd08d1..1f004c6c99507 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -249,7 +249,7 @@ verifyScheduleModifiers(OpAsmParser &parser,
SmallVectorImpl<SmallString<12>> &modifiers) {
if (modifiers.size() > 2)
return parser.emitError(parser.getNameLoc()) << " unexpected modifier(s)";
- for (auto mod : modifiers) {
+ for (const auto &mod : modifiers) {
// Translate the string. If it has no value, then it was not a valid
// modifier!
auto symbol = symbolizeScheduleModifier(mod);
diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp
index b9e5415dadcc9..2a399ec2169ee 100644
--- a/mlir/lib/Dialect/PDL/IR/PDL.cpp
+++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp
@@ -198,7 +198,7 @@ static LogicalResult verifyResultTypesAreInferrable(OperationOp op,
return success();
// Otherwise, make sure each of the types can be inferred.
- for (auto it : llvm::enumerate(resultTypes)) {
+ for (const auto &it : llvm::enumerate(resultTypes)) {
Operation *resultTypeOp = it.value().getDefiningOp();
assert(resultTypeOp && "expected valid result type operation");
diff --git a/mlir/lib/Dialect/SCF/SCF.cpp b/mlir/lib/Dialect/SCF/SCF.cpp
index 1ca7e49c5a3a2..dd47a55fe6b99 100644
--- a/mlir/lib/Dialect/SCF/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/SCF.cpp
@@ -1247,7 +1247,7 @@ struct RemoveUnusedResults : public OpRewritePattern<IfOp> {
// Replace the operation by the new one.
SmallVector<Value, 4> repResults(op.getNumResults());
- for (auto en : llvm::enumerate(usedResults))
+ for (const auto &en : llvm::enumerate(usedResults))
repResults[en.value().getResultNumber()] = newOp.getResult(en.index());
rewriter.replaceOp(op, repResults);
return success();
@@ -1296,7 +1296,8 @@ struct ConvertTrivialIfToSelect : public OpRewritePattern<IfOp> {
SmallVector<Value> results(op->getNumResults());
assert(thenYieldArgs.size() == results.size());
assert(elseYieldArgs.size() == results.size());
- for (auto it : llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) {
+ for (const auto &it :
+ llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) {
Value trueVal = std::get<0>(it.value());
Value falseVal = std::get<1>(it.value());
if (trueVal == falseVal)
@@ -1564,7 +1565,7 @@ struct CombineIfs : public OpRewritePattern<IfOp> {
SmallVector<Value> prevValues;
SmallVector<Value> nextValues;
- for (auto pair : llvm::enumerate(combinedIf.getResults())) {
+ for (const auto &pair : llvm::enumerate(combinedIf.getResults())) {
if (pair.index() < prevIf.getNumResults())
prevValues.push_back(pair.value());
else
@@ -2368,7 +2369,7 @@ struct WhileUnusedResult : public OpRewritePattern<WhileOp> {
SmallVector<Type> newResultTypes;
SmallVector<Value> newTermArgs;
bool needUpdate = false;
- for (auto it :
+ for (const auto &it :
llvm::enumerate(llvm::zip(op.getResults(), afterArgs, termArgs))) {
auto i = static_cast<unsigned>(it.index());
Value result = std::get<0>(it.value());
@@ -2403,7 +2404,7 @@ struct WhileUnusedResult : public OpRewritePattern<WhileOp> {
// null).
SmallVector<Value> newResults(op.getNumResults());
SmallVector<Value> newAfterBlockArgs(op.getNumResults());
- for (auto it : llvm::enumerate(newResultsIndices)) {
+ for (const auto &it : llvm::enumerate(newResultsIndices)) {
newResults[it.value()] = newWhile.getResult(it.index());
newAfterBlockArgs[it.value()] = newAfterBlock.getArgument(it.index());
}
diff --git a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
index a3f3070440522..d74b5d0457ccf 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
@@ -69,7 +69,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern<ForOp> {
// Rewrite uses of the for-loop block arguments to the new while-loop
// "after" arguments
- for (auto barg : enumerate(forOp.getBody(0)->getArguments()))
+ for (const auto &barg : enumerate(forOp.getBody(0)->getArguments()))
barg.value().replaceAllUsesWith(afterBlock->getArgument(barg.index()));
// Inline for-loop body operations into 'after' region.
@@ -87,7 +87,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern<ForOp> {
// an extra value (the induction variable escapes the loop through being
// carried in the set of iterargs). Instead, rewrite uses of the forOp
// results.
- for (auto arg : llvm::enumerate(forOp.getResults()))
+ for (const auto &arg : llvm::enumerate(forOp.getResults()))
arg.value().replaceAllUsesWith(whileOp.getResult(arg.index() + 1));
rewriter.eraseOp(forOp);
diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
index 321d953c17edf..3ef508275a76c 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
@@ -198,7 +198,7 @@ scf::ForOp LoopPipelinerInternal::createKernelLoop(
llvm::SmallVector<Value> newLoopArg;
// For existing loop argument initialize them with the right version from the
// prologue.
- for (auto retVal :
+ for (const auto &retVal :
llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) {
Operation *def = retVal.value().getDefiningOp();
assert(def && "Only support loop carried dependencies of distance 1");
@@ -245,7 +245,7 @@ void LoopPipelinerInternal::createKernel(
rewriter.setInsertionPoint(newForOp.getBody(), newForOp.getBody()->begin());
BlockAndValueMapping mapping;
mapping.map(forOp.getInductionVar(), newForOp.getInductionVar());
- for (auto arg : llvm::enumerate(forOp.getRegionIterArgs())) {
+ for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) {
mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]);
}
for (Operation *op : opOrder) {
@@ -325,7 +325,7 @@ void LoopPipelinerInternal::createKernel(
yieldOperands.push_back(mapping.lookupOrDefault(it.first));
}
// Map the yield operand to the forOp returned value.
- for (auto retVal :
+ for (const auto &retVal :
llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) {
Operation *def = retVal.value().getDefiningOp();
assert(def && "Only support loop carried dependencies of distance 1");
diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
index 998c73624ca75..8bf32ac68c7f7 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
@@ -160,8 +160,8 @@ mlir::scf::tileParallelLoop(ParallelOp op, ArrayRef<int64_t> tileSizes,
ifInbound.getThenRegion().takeBody(op.getRegion());
Block &thenBlock = ifInbound.getThenRegion().front();
b.setInsertionPointToStart(innerLoop.getBody());
- for (auto ivs : llvm::enumerate(llvm::zip(innerLoop.getInductionVars(),
- outerLoop.getInductionVars()))) {
+ for (const auto &ivs : llvm::enumerate(llvm::zip(
+ innerLoop.getInductionVars(), outerLoop.getInductionVars()))) {
auto newIndex = b.create<arith::AddIOp>(
op.getLoc(), std::get<0>(ivs.value()), std::get<1>(ivs.value()));
thenBlock.getArgument(ivs.index())
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
index 10a3ba646771f..6094ad8bf2242 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
@@ -182,7 +182,7 @@ LogicalResult ProcessInterfaceVarABI::matchAndRewrite(
auto indexType = typeConverter.getIndexType();
auto attrName = spirv::getInterfaceVarABIAttrName();
- for (auto argType : llvm::enumerate(funcOp.getType().getInputs())) {
+ for (const auto &argType : llvm::enumerate(funcOp.getType().getInputs())) {
auto abiInfo = funcOp.getArgAttrOfType<spirv::InterfaceVarABIAttr>(
argType.index(), attrName);
if (!abiInfo) {
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index afa26650b4c44..776f022fe2604 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -235,7 +235,7 @@ getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) {
return llvm::None;
int64_t memrefSize = -1;
- for (auto shape : enumerate(dims))
+ for (const auto &shape : enumerate(dims))
memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]);
return (offset + memrefSize) * elementSize.getValue();
@@ -557,7 +557,7 @@ FuncOpConversion::matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
return failure();
TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
- for (auto argType : enumerate(fnType.getInputs())) {
+ for (const auto &argType : enumerate(fnType.getInputs())) {
auto convertedType = getTypeConverter()->convertType(argType.value());
if (!convertedType)
return failure();
@@ -778,7 +778,7 @@ Value mlir::spirv::linearizeIndex(ValueRange indices, ArrayRef<int64_t> strides,
Value linearizedIndex = builder.create<spirv::ConstantOp>(
loc, integerType, IntegerAttr::get(integerType, offset));
- for (auto index : llvm::enumerate(indices)) {
+ for (const auto &index : llvm::enumerate(indices)) {
Value strideVal = builder.create<spirv::ConstantOp>(
loc, integerType,
IntegerAttr::get(integerType, strides[index.index()]));
diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 4a415b456826f..481e191e9df4a 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -1669,7 +1669,7 @@ static LogicalResult verify(ReduceOp op) {
"ReduceOp operates on an extent tensor");
}
- for (auto type : llvm::enumerate(op.getInitVals()))
+ for (const auto &type : llvm::enumerate(op.getInitVals()))
if (block.getArgument(type.index() + 2).getType() != type.value().getType())
return op.emitOpError()
<< "type mismatch between argument " << type.index() + 2
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index f7665135b5b1f..665021b4c70d6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1017,7 +1017,7 @@ llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
ArrayRef<int64_t> resultShape = getType().getShape();
SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
unsigned shapePos = 0;
- for (auto size : enumerate(mixedSizes)) {
+ for (const auto &size : enumerate(mixedSizes)) {
Optional<int64_t> sizeVal = getConstantIntValue(size.value());
// If the size is not 1, or if the current matched dimension of the result
// is the same static shape as the size value (which is 1), then the
@@ -1039,7 +1039,7 @@ LogicalResult ExtractSliceOp::reifyResultShapes(
SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
Location loc = getLoc();
- for (auto size : enumerate(mixedSizes)) {
+ for (const auto &size : enumerate(mixedSizes)) {
if (droppedDims.count(size.index()))
continue;
if (auto attr = size.value().dyn_cast<Attribute>()) {
diff --git a/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp b/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp
index 9a1ca53074d01..92daab5e8b8f1 100644
--- a/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp
@@ -126,7 +126,7 @@ class ReduceMultiDimReductionRank
// 1. Separate reduction and parallel dims.
SmallVector<int64_t, 4> parallelDims, parallelShapes;
SmallVector<int64_t, 4> reductionDims, reductionShapes;
- for (auto it : llvm::enumerate(reductionMask)) {
+ for (const auto &it : llvm::enumerate(reductionMask)) {
int64_t i = it.index();
bool isReduction = it.value();
if (isReduction) {
diff --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp
index fa608113b079a..3f83578caade3 100644
--- a/mlir/lib/Dialect/Vector/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/VectorOps.cpp
@@ -270,7 +270,7 @@ void vector::MultiDimReductionOp::build(OpBuilder &builder,
result.addTypes(targetType);
SmallVector<int64_t> reductionDims;
- for (auto en : llvm::enumerate(reductionMask))
+ for (const auto &en : llvm::enumerate(reductionMask))
if (en.value())
reductionDims.push_back(en.index());
result.addAttribute(getReductionDimsAttrName(),
@@ -615,7 +615,7 @@ static LogicalResult verify(ContractionOp op) {
// that the number of map outputs equals the rank of its associated
// vector operand.
unsigned numIterators = op.iterator_types().getValue().size();
- for (auto it : llvm::enumerate(op.indexing_maps())) {
+ for (const auto &it : llvm::enumerate(op.indexing_maps())) {
auto index = it.index();
auto map = it.value().cast<AffineMapAttr>().getValue();
if (map.getNumSymbols() != 0)
@@ -695,7 +695,7 @@ static std::vector<std::pair<int64_t, int64_t>>
getDimMap(ArrayRef<AffineMap> indexingMaps, ArrayAttr iteratorTypes,
StringRef targetIteratorTypeName, MLIRContext *context) {
std::vector<std::pair<int64_t, int64_t>> dimMap;
- for (auto it : llvm::enumerate(iteratorTypes)) {
+ for (const auto &it : llvm::enumerate(iteratorTypes)) {
auto iteratorTypeName = it.value().cast<StringAttr>().getValue();
if (iteratorTypeName != targetIteratorTypeName)
continue;
@@ -715,7 +715,7 @@ void ContractionOp::getIterationBounds(
auto resVectorType = getResultType().dyn_cast<VectorType>();
SmallVector<AffineMap, 4> indexingMaps(getIndexingMaps());
SmallVector<int64_t, 2> iterationShape;
- for (auto it : llvm::enumerate(iterator_types())) {
+ for (const auto &it : llvm::enumerate(iterator_types())) {
// Search lhs/rhs map results for 'targetExpr'.
auto targetExpr = getAffineDimExpr(it.index(), getContext());
auto iteratorTypeName = it.value().cast<StringAttr>().getValue();
@@ -738,7 +738,7 @@ void ContractionOp::getIterationIndexMap(
std::vector<DenseMap<int64_t, int64_t>> &iterationIndexMap) {
unsigned numMaps = indexing_maps().getValue().size();
iterationIndexMap.resize(numMaps);
- for (auto it : llvm::enumerate(indexing_maps())) {
+ for (const auto &it : llvm::enumerate(indexing_maps())) {
auto index = it.index();
auto map = it.value().cast<AffineMapAttr>().getValue();
for (unsigned i = 0, e = map.getNumResults(); i < e; ++i) {
@@ -933,7 +933,7 @@ static LogicalResult verify(vector::ExtractOp op) {
if (positionAttr.size() > static_cast<unsigned>(op.getVectorType().getRank()))
return op.emitOpError(
"expected position attribute of rank smaller than vector rank");
- for (auto en : llvm::enumerate(positionAttr)) {
+ for (const auto &en : llvm::enumerate(positionAttr)) {
auto attr = en.value().dyn_cast<IntegerAttr>();
if (!attr || attr.getInt() < 0 ||
attr.getInt() >= op.getVectorType().getDimSize(en.index()))
@@ -1511,7 +1511,7 @@ static LogicalResult verify(ShuffleOp op) {
return op.emitOpError("mask length mismatch");
// Verify all indices.
int64_t indexSize = v1Type.getDimSize(0) + v2Type.getDimSize(0);
- for (auto en : llvm::enumerate(maskAttr)) {
+ for (const auto &en : llvm::enumerate(maskAttr)) {
auto attr = en.value().dyn_cast<IntegerAttr>();
if (!attr || attr.getInt() < 0 || attr.getInt() >= indexSize)
return op.emitOpError("mask index #")
@@ -1621,7 +1621,7 @@ static LogicalResult verify(InsertOp op) {
(positionAttr.size() != static_cast<unsigned>(destVectorType.getRank())))
return op.emitOpError(
"expected position attribute rank to match the dest vector rank");
- for (auto en : llvm::enumerate(positionAttr)) {
+ for (const auto &en : llvm::enumerate(positionAttr)) {
auto attr = en.value().dyn_cast<IntegerAttr>();
if (!attr || attr.getInt() < 0 ||
attr.getInt() >= destVectorType.getDimSize(en.index()))
@@ -2822,7 +2822,7 @@ struct FoldExtractSliceIntoTransferRead
newIndices.push_back(getValueOrCreateConstantIndexOp(
rewriter, extractOp.getLoc(), offset));
}
- for (auto it : llvm::enumerate(xferOp.indices())) {
+ for (const auto &it : llvm::enumerate(xferOp.indices())) {
OpFoldResult offset =
extractOp.getMixedOffsets()[it.index() + rankReduced];
newIndices.push_back(rewriter.create<arith::AddIOp>(
@@ -3913,7 +3913,7 @@ static LogicalResult verify(vector::TransposeOp op) {
if (rank != size)
return op.emitOpError("transposition length mismatch: ") << size;
SmallVector<bool, 8> seen(rank, false);
- for (auto ta : llvm::enumerate(transpAttr)) {
+ for (const auto &ta : llvm::enumerate(transpAttr)) {
int64_t i = ta.value().cast<IntegerAttr>().getInt();
if (i < 0 || i >= rank)
return op.emitOpError("transposition index out of range: ") << i;
@@ -4004,7 +4004,7 @@ static LogicalResult verify(ConstantMaskOp &op) {
// result dimension size.
auto resultShape = resultType.getShape();
SmallVector<int64_t, 4> maskDimSizes;
- for (auto it : llvm::enumerate(op.mask_dim_sizes())) {
+ for (const auto &it : llvm::enumerate(op.mask_dim_sizes())) {
int64_t attrValue = it.value().cast<IntegerAttr>().getInt();
if (attrValue < 0 || attrValue > resultShape[it.index()])
return op.emitOpError(
diff --git a/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp b/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp
index 36725e03ae09e..c47ef94e2e23c 100644
--- a/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp
+++ b/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp
@@ -79,7 +79,7 @@ struct TransferReadPermutationLowering
// Apply the reverse transpose to deduce the type of the transfer_read.
ArrayRef<int64_t> originalShape = op.getVectorType().getShape();
SmallVector<int64_t> newVectorShape(originalShape.size());
- for (auto pos : llvm::enumerate(permutation)) {
+ for (const auto &pos : llvm::enumerate(permutation)) {
newVectorShape[pos.value()] = originalShape[pos.index()];
}
diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp
index 3cac3302af32a..0b49ccd58b27d 100644
--- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp
@@ -53,7 +53,7 @@ static Optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
static SmallVector<Attribute, 4> adjustIter(ArrayAttr iteratorTypes,
int64_t index) {
SmallVector<Attribute, 4> results;
- for (auto it : llvm::enumerate(iteratorTypes)) {
+ for (const auto &it : llvm::enumerate(iteratorTypes)) {
int64_t idx = it.index();
if (idx == index)
continue;
@@ -871,7 +871,7 @@ struct MultiReduceToContract
auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size());
SmallVector<AffineExpr> exprs;
SmallVector<StringRef> iteratorTypes;
- for (auto isReduceDim : llvm::enumerate(reductionMask)) {
+ for (const auto &isReduceDim : llvm::enumerate(reductionMask)) {
if (!isReduceDim.value()) {
iteratorTypes.push_back(getParallelIteratorTypeName());
exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index()));
@@ -997,7 +997,7 @@ struct CombineContractBroadcast
broadcast.getVectorType().getRank() - srcType.getRank();
bool innerDimBroadcast = false;
SmallVector<AffineExpr> originalDims;
- for (auto dim : llvm::enumerate(srcType.getShape())) {
+ for (const auto &dim : llvm::enumerate(srcType.getShape())) {
if (dim.value() !=
broadcast.getVectorType().getDimSize(rankDiff + dim.index())) {
innerDimBroadcast = true;
diff --git a/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp b/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp
index 4c31164b433e2..de5b2fdcfcebb 100644
--- a/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp
+++ b/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp
@@ -52,7 +52,7 @@ sliceTransferIndices(int64_t index, ArrayRef<int64_t> originalShape,
getVectorOffset(originalShape, targetShape, index);
// Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'.
SmallVector<Value> slicedIndices(indices.begin(), indices.end());
- for (auto dim : llvm::enumerate(permutationMap.getResults())) {
+ for (const auto &dim : llvm::enumerate(permutationMap.getResults())) {
if (isBroadcast(dim.value()))
continue;
unsigned pos = dim.value().cast<AffineDimExpr>().getPosition();
@@ -429,7 +429,7 @@ struct ContractExtractPattern : public OpRewritePattern<vector::ExtractMapOp> {
for (unsigned i : llvm::seq(unsigned(0), affineMap.getNumResults()))
map[affineMap.getDimPosition(i)] = extract.getResultType().getDimSize(i);
SmallVector<Value, 4> extractOperands;
- for (auto it : llvm::enumerate(contract.getIndexingMaps())) {
+ for (const auto &it : llvm::enumerate(contract.getIndexingMaps())) {
// For each operands calculate the new vector type after distribution.
Value operand = contract->getOperand(it.index());
auto vecType = operand.getType().cast<VectorType>();
diff --git a/mlir/lib/IR/AffineExpr.cpp b/mlir/lib/IR/AffineExpr.cpp
index 47dcff627a336..2a3b9819b320f 100644
--- a/mlir/lib/IR/AffineExpr.cpp
+++ b/mlir/lib/IR/AffineExpr.cpp
@@ -1022,7 +1022,7 @@ static AffineExpr getSemiAffineExprFromFlatForm(ArrayRef<int64_t> flatExprs,
// as lhs/rhs, and store the indices, constant coefficient corresponding to
// the indices in `coefficients` map, and affine expression corresponding to
// in indices in `indexToExprMap` map.
- for (auto it : llvm::enumerate(localExprs)) {
+ for (const auto &it : llvm::enumerate(localExprs)) {
AffineExpr expr = it.value();
if (flatExprs[numDims + numSymbols + it.index()] == 0)
continue;
diff --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp
index a60120637011c..ecdf8376b5fcf 100644
--- a/mlir/lib/IR/AffineMap.cpp
+++ b/mlir/lib/IR/AffineMap.cpp
@@ -121,7 +121,7 @@ bool AffineMap::isMinorIdentityWithBroadcasting(
if (getNumDims() < getNumResults())
return false;
unsigned suffixStart = getNumDims() - getNumResults();
- for (auto idxAndExpr : llvm::enumerate(getResults())) {
+ for (const auto &idxAndExpr : llvm::enumerate(getResults())) {
unsigned resIdx = idxAndExpr.index();
AffineExpr expr = idxAndExpr.value();
if (auto constExpr = expr.dyn_cast<AffineConstantExpr>()) {
@@ -168,7 +168,7 @@ bool AffineMap::isPermutationOfMinorIdentityWithBroadcasting(
getNumResults() > getNumInputs() ? getNumResults() - getNumInputs() : 0;
llvm::SmallBitVector dimFound(std::max(getNumInputs(), getNumResults()),
false);
- for (auto idxAndExpr : llvm::enumerate(getResults())) {
+ for (const auto &idxAndExpr : llvm::enumerate(getResults())) {
unsigned resIdx = idxAndExpr.index();
AffineExpr expr = idxAndExpr.value();
// Each result may be either a constant 0 (broadcast dimension) or a
@@ -675,7 +675,7 @@ AffineMap mlir::inversePermutation(AffineMap map) {
return map;
assert(map.getNumSymbols() == 0 && "expected map without symbols");
SmallVector<AffineExpr, 4> exprs(map.getNumDims());
- for (auto en : llvm::enumerate(map.getResults())) {
+ for (const auto &en : llvm::enumerate(map.getResults())) {
auto expr = en.value();
// Skip non-permutations.
if (auto d = expr.dyn_cast<AffineDimExpr>()) {
diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index e965afb0feaaa..6efd384ad3cce 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -1036,7 +1036,7 @@ AffineMap mlir::makeStridedLinearLayoutMap(ArrayRef<int64_t> strides,
}
// AffineExpr for strides.
- for (auto en : llvm::enumerate(strides)) {
+ for (const auto &en : llvm::enumerate(strides)) {
auto dim = en.index();
auto stride = en.value();
assert(stride != 0 && "Invalid stride specification");
diff --git a/mlir/lib/IR/Verifier.cpp b/mlir/lib/IR/Verifier.cpp
index 840a3156f283a..bbc560d429d79 100644
--- a/mlir/lib/IR/Verifier.cpp
+++ b/mlir/lib/IR/Verifier.cpp
@@ -316,7 +316,7 @@ OperationVerifier::verifyDominanceOfContainedRegions(Operation &op,
for (Operation &op : block) {
if (isReachable) {
// Check that operands properly dominate this use.
- for (auto operand : llvm::enumerate(op.getOperands())) {
+ for (const auto &operand : llvm::enumerate(op.getOperands())) {
if (domInfo.properlyDominates(operand.value(), &op))
continue;
diff --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
index 26c80795c6509..d2ab30282562e 100644
--- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
+++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
@@ -131,7 +131,7 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
<< succInputsTypes.size();
}
- for (auto typesIdx :
+ for (const auto &typesIdx :
llvm::enumerate(llvm::zip(*sourceTypes, succInputsTypes))) {
Type sourceType = std::get<0>(typesIdx.value());
Type inputType = std::get<1>(typesIdx.value());
@@ -266,7 +266,7 @@ bool mlir::insideMutuallyExclusiveRegions(Operation *a, Operation *b) {
return false;
// Compute index of region.
int64_t beginIndex = -1;
- for (auto it : llvm::enumerate(branchOp->getRegions()))
+ for (const auto &it : llvm::enumerate(branchOp->getRegions()))
if (&it.value() == begin)
beginIndex = it.index();
assert(beginIndex != -1 && "could not find region in op");
diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp
index 67c9ccbaec5be..ff17ed0498bb2 100644
--- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp
+++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp
@@ -189,7 +189,7 @@ LogicalResult mlir::detail::inferReturnTensorTypes(
if (failed(componentTypeFn(context, location, operands, attributes, regions,
retComponents)))
return failure();
- for (auto shapeAndType : retComponents) {
+ for (const auto &shapeAndType : retComponents) {
assert(shapeAndType.getAttribute() == nullptr && "attribute not supported");
if (shapeAndType.hasRank())
inferredReturnTypes.push_back(RankedTensorType::get(
diff --git a/mlir/lib/Reducer/ReductionTreePass.cpp b/mlir/lib/Reducer/ReductionTreePass.cpp
index 859f64a01e287..a1308f9362555 100644
--- a/mlir/lib/Reducer/ReductionTreePass.cpp
+++ b/mlir/lib/Reducer/ReductionTreePass.cpp
@@ -41,7 +41,7 @@ static void applyPatterns(Region ®ion,
std::vector<Operation *> opsNotInRange;
std::vector<Operation *> opsInRange;
size_t keepIndex = 0;
- for (auto op : enumerate(region.getOps())) {
+ for (const auto &op : enumerate(region.getOps())) {
int index = op.index();
if (keepIndex < rangeToKeep.size() &&
index == rangeToKeep[keepIndex].second)
diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp
index bd98ce0c1e092..765c47b2ed0cf 100644
--- a/mlir/lib/Rewrite/ByteCode.cpp
+++ b/mlir/lib/Rewrite/ByteCode.cpp
@@ -198,9 +198,9 @@ class Generator {
maxTypeRangeMemoryIndex(maxTypeRangeMemoryIndex),
maxValueRangeMemoryIndex(maxValueRangeMemoryIndex),
maxLoopLevel(maxLoopLevel) {
- for (auto it : llvm::enumerate(constraintFns))
+ for (const auto &it : llvm::enumerate(constraintFns))
constraintToMemIndex.try_emplace(it.value().first(), it.index());
- for (auto it : llvm::enumerate(rewriteFns))
+ for (const auto &it : llvm::enumerate(rewriteFns))
externalRewriterToMemIndex.try_emplace(it.value().first(), it.index());
}
@@ -631,7 +631,7 @@ void Generator::allocateMemoryIndices(FuncOp matcherFunc,
ByteCodeLiveRange &defRange = defIt.second;
// Try to allocate to an existing index.
- for (auto existingIndexIt : llvm::enumerate(allocatedIndices)) {
+ for (const auto &existingIndexIt : llvm::enumerate(allocatedIndices)) {
ByteCodeLiveRange &existingRange = existingIndexIt.value();
if (!defRange.overlaps(existingRange)) {
existingRange.unionWith(defRange);
diff --git a/mlir/lib/Rewrite/PatternApplicator.cpp b/mlir/lib/Rewrite/PatternApplicator.cpp
index d5a98fef09e72..edaf13e575d35 100644
--- a/mlir/lib/Rewrite/PatternApplicator.cpp
+++ b/mlir/lib/Rewrite/PatternApplicator.cpp
@@ -53,7 +53,7 @@ void PatternApplicator::applyCostModel(CostModel model) {
// Apply the cost model to the bytecode patterns first, and then the native
// patterns.
if (const PDLByteCode *bytecode = frozenPatternList.getPDLByteCode()) {
- for (auto it : llvm::enumerate(bytecode->getPatterns()))
+ for (const auto &it : llvm::enumerate(bytecode->getPatterns()))
mutableByteCodeState->updatePatternBenefit(it.index(), model(it.value()));
}
diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index 366a3d7ce24a8..3e06f9caf7b10 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -821,7 +821,7 @@ LogicalResult Importer::processFunction(llvm::Function *f) {
currentEntryBlock = blockList[0];
// Add function arguments to the entry block.
- for (auto kv : llvm::enumerate(f->args()))
+ for (const auto &kv : llvm::enumerate(f->args()))
instMap[&kv.value()] =
blockList[0]->addArgument(functionType.getParamType(kv.index()));
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 7f238afd2c927..404018bebe932 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -381,7 +381,7 @@ static Value getPHISourceValue(Block *current, Block *pred,
// the case branch that was taken.
if (switchOp.getDefaultDestination() == current)
return switchOp.getDefaultOperands()[index];
- for (auto i : llvm::enumerate(switchOp.getCaseDestinations()))
+ for (const auto &i : llvm::enumerate(switchOp.getCaseDestinations()))
if (i.value() == current)
return switchOp.getCaseOperands(i.index())[index];
}
diff --git a/mlir/lib/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Transforms/BufferResultsToOutParams.cpp
index 2abdccc6866c8..2d74c4085e70f 100644
--- a/mlir/lib/Transforms/BufferResultsToOutParams.cpp
+++ b/mlir/lib/Transforms/BufferResultsToOutParams.cpp
@@ -25,7 +25,7 @@ static void updateFuncOp(FuncOp func,
// Collect information about the results will become appended arguments.
SmallVector<Type, 6> erasedResultTypes;
SmallVector<unsigned, 6> erasedResultIndices;
- for (auto resultType : llvm::enumerate(functionType.getResults())) {
+ for (const auto &resultType : llvm::enumerate(functionType.getResults())) {
if (resultType.value().isa<BaseMemRefType>()) {
erasedResultIndices.push_back(resultType.index());
erasedResultTypes.push_back(resultType.value());
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index e32c54264c656..fd9bba81c4a2b 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -84,7 +84,7 @@ static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) {
OpBuilder bOuter(forOp);
// Put together alloc operands for any dynamic dimensions of the memref.
SmallVector<Value, 4> allocOperands;
- for (auto dim : llvm::enumerate(oldMemRefType.getShape())) {
+ for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
allocOperands.push_back(bOuter.createOrFold<memref::DimOp>(
forOp.getLoc(), oldMemRef, dim.index()));
diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index a299b8c5b6603..24711b0de1322 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -252,7 +252,7 @@ class OperationTransactionState {
op->setLoc(loc);
op->setAttrs(attrs);
op->setOperands(operands);
- for (auto it : llvm::enumerate(successors))
+ for (const auto &it : llvm::enumerate(successors))
op->setSuccessor(it.value(), it.index());
}
@@ -1256,7 +1256,7 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
remapped.reserve(llvm::size(values));
SmallVector<Type, 1> legalTypes;
- for (auto it : llvm::enumerate(values)) {
+ for (const auto &it : llvm::enumerate(values)) {
Value operand = it.value();
Type origType = operand.getType();
diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp
index 3eed22d8a5b42..8b2040633a1ae 100644
--- a/mlir/lib/Transforms/Utils/InliningUtils.cpp
+++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp
@@ -215,7 +215,7 @@ inlineRegionImpl(InlinerInterface &interface, Region *src, Block *inlineBlock,
} else {
// Otherwise, there were multiple blocks inlined. Add arguments to the post
// insertion block to represent the results to replace.
- for (auto resultToRepl : llvm::enumerate(resultsToReplace)) {
+ for (const auto &resultToRepl : llvm::enumerate(resultsToReplace)) {
resultToRepl.value().replaceAllUsesWith(postInsertBlock->addArgument(
regionResultTypes[resultToRepl.index()]));
}
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 1700d60a91730..6328b59d9008e 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -3353,7 +3353,7 @@ createFullTiles(MutableArrayRef<AffineForOp> inputNest,
// Add the body for the full tile loop nest.
BlockAndValueMapping operandMap;
- for (auto loopEn : llvm::enumerate(inputNest))
+ for (const auto &loopEn : llvm::enumerate(inputNest))
operandMap.map(loopEn.value().getInductionVar(),
fullTileLoops[loopEn.index()].getInductionVar());
b = OpBuilder::atBlockTerminator(fullTileLoops.back().getBody());
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index 023b1d6ed5eaf..e60e7e65cd603 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -589,7 +589,7 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
1 + blocksToMerge.size(),
SmallVector<Value, 8>(operandsToMerge.size()));
unsigned curOpIndex = 0;
- for (auto it : llvm::enumerate(operandsToMerge)) {
+ for (const auto &it : llvm::enumerate(operandsToMerge)) {
unsigned nextOpOffset = it.value().first - curOpIndex;
curOpIndex = it.value().first;
More information about the Mlir-commits
mailing list