[Mlir-commits] [mlir] [mlir][NFC] Avoid using braced initializer lists to call a constructor. (PR #123714)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Jan 21 00:52:27 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir
Author: Han-Chung Wang (hanhanW)
<details>
<summary>Changes</summary>
In the LLVM style guide, we prefer not using braced initializer lists to call a constructor. Also, we prefer using an equal before the open curly brace if we use a braced initializer list when initializing a variable.
See
https://llvm.org/docs/CodingStandards.html#do-not-use-braced-initializer-lists-to-call-a-constructor for more details.
The style guide does not explain the reason well. There is an article from abseil, which mentions few benefits. E.g., we can avoid the most vexing parse, etc. See https://abseil.io/tips/88 for more details.
---
Full diff: https://github.com/llvm/llvm-project/pull/123714.diff
13 Files Affected:
- (modified) mlir/lib/Bindings/Python/IRAttributes.cpp (+1-1)
- (modified) mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp (+1-1)
- (modified) mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp (+2-2)
- (modified) mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp (+1-1)
- (modified) mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp (+2-2)
- (modified) mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp (+1-1)
- (modified) mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp (+3-3)
- (modified) mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp (+1-1)
- (modified) mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp (+13-14)
- (modified) mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp (+3-3)
- (modified) mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp (+1-1)
- (modified) mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp (+2-2)
- (modified) mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp (+2-2)
``````````diff
diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp
index 08f7d4881e137b..7bc21a31c3c84c 100644
--- a/mlir/lib/Bindings/Python/IRAttributes.cpp
+++ b/mlir/lib/Bindings/Python/IRAttributes.cpp
@@ -845,7 +845,7 @@ class PyDenseElementsAttribute
}
shapedType = *explicitType;
} else {
- SmallVector<int64_t> shape{static_cast<int64_t>(numAttributes)};
+ SmallVector<int64_t> shape = {static_cast<int64_t>(numAttributes)};
shapedType = mlirRankedTensorTypeGet(
shape.size(), shape.data(),
mlirAttributeGetType(pyTryCast<PyAttribute>(attributes[0])),
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index a183c27abf62ae..f97e0ff1e30ea7 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1476,7 +1476,7 @@ class MaterializeResizeBroadcast : public OpRewritePattern<tosa::ResizeOp> {
reassociationMap.push_back({});
reassociationMap.back().push_back(builder.getAffineDimExpr(3));
- llvm::SmallVector<int64_t> collapseShape{batch};
+ llvm::SmallVector<int64_t> collapseShape = {batch};
if (inputH != 1)
collapseShape.push_back(outputH);
if (inputW != 1)
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
index b7af37d293ac1c..57a5fe75a007b7 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -648,12 +648,12 @@ class FullyConnectedConverter
SmallVector<Value> filteredDims = condenseValues(dynDims);
- SmallVector<int64_t> permutation{1, 0};
+ SmallVector<int64_t> permutation = {1, 0};
auto permutationAttr = rewriter.getI64TensorAttr(permutation);
Value permutationValue =
rewriter.create<arith::ConstantOp>(loc, permutationAttr);
- SmallVector<int64_t> newWeightShape{weightShape[1], weightShape[0]};
+ SmallVector<int64_t> newWeightShape = {weightShape[1], weightShape[0]};
Type newWeightTy =
RankedTensorType::get(newWeightShape, weightTy.getElementType());
diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
index d3229d2e912966..dc4ee4e926bb46 100644
--- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
+++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
@@ -182,7 +182,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
readOp, "Unsupported data type for tranposition");
// If load is transposed, get the base shape for the tensor descriptor.
- SmallVector<int64_t> descShape{vecTy.getShape()};
+ SmallVector<int64_t> descShape(vecTy.getShape());
if (isTransposeLoad)
std::reverse(descShape.begin(), descShape.end());
auto descType = xegpu::TensorDescType::get(
diff --git a/mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp b/mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp
index 2c0c84d055f592..2a1271dfd6bdff 100644
--- a/mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp
+++ b/mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp
@@ -126,8 +126,8 @@ class LowerContractionToSMMLAPattern
loc, op.getResultType(), rewriter.getZeroAttr(op.getResultType()));
SmallVector<int64_t> unrolledSize = *op.getShapeForUnroll();
- SmallVector<int64_t> smmlaShape{2, 8};
- SmallVector<int64_t> loopOrder{0, 1};
+ SmallVector<int64_t> smmlaShape = {2, 8};
+ SmallVector<int64_t> loopOrder = {0, 1};
if (unrolledSize.size() == 3) {
smmlaShape.insert(smmlaShape.begin(), isVecmat ? 1 : 2);
loopOrder.push_back(2);
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp b/mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp
index e6162ad97d7844..0657a87d1d1ac1 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp
@@ -222,7 +222,7 @@ transform::gpu::CopyMappingInfo::inferNumThreadsImpl(
// Scale the most minor size to account for the chosen vector size and
// maximize the number of threads without exceeding the total number of
// threads.
- SmallVector<int64_t> scaledSizes{sizes};
+ SmallVector<int64_t> scaledSizes(sizes);
scaledSizes.back() /= desiredVectorSize;
if (scaledSizes.back() > totalNumThreads) {
LDBG("--Too few threads given the required vector size -> FAIL");
diff --git a/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp b/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
index 57344f986480da..ed1685a9cb9e69 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
@@ -55,7 +55,7 @@ static bool validateFullTilesOnDims(linalg::LinalgOp linalgOp,
// Skip the batch dimension if present.
// Offset all dimensions accordingly.
- SmallVector<int64_t, 3> offsetDims{dims};
+ SmallVector<int64_t, 3> offsetDims(dims);
for (size_t i = 0; i < offsetDims.size(); i++)
offsetDims[i] += batchDimsOffset;
@@ -111,10 +111,10 @@ transposePackedMatmul(RewriterBase &rewriter, linalg::LinalgOp linalgOp,
// Transpose only the dimensions that need that to conform to the provided
// transpotion settings.
- SmallVector<int64_t> innerPerm{0, 1};
+ SmallVector<int64_t> innerPerm = {0, 1};
if (isInnerTransposed != transposeInnerBlocks)
innerPerm = {1, 0};
- SmallVector<int64_t> outerPerm{0, 1};
+ SmallVector<int64_t> outerPerm = {0, 1};
if (isOuterTransposed != transposeOuterBlocks)
outerPerm = {1, 0};
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
index 1294043bf3837d..bdaf1f8666b92e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
@@ -52,7 +52,7 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
FHWCConvOp op) {
// Construct a permutation of the filter tensor dimensions. For a 2D
// convolution this will be known statically as [1, 2, 3, 0].
- SmallVector<int64_t> filterPerm({1, 2, 3, 0});
+ SmallVector<int64_t> filterPerm = {1, 2, 3, 0};
// Create the type for the transposed filter tensor.
auto filter = op->getOperand(1);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 863f2280e46ce6..299bbc226dec8b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -86,8 +86,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
if (isSingleChanneled) {
// Extract input slice of size {wSizeStep} @ [w + kw] for non-channeled
// convolution.
- SmallVector<int64_t> sizes{wSizeStep};
- SmallVector<int64_t> strides{1};
+ SmallVector<int64_t> sizes = {wSizeStep};
+ SmallVector<int64_t> strides = {1};
for (int64_t kw = 0; kw < kwSize; ++kw) {
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
@@ -97,8 +97,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
} else {
// Extract lhs slice of size {n, wSizeStep, c} @ [0, sw * w + dw * kw, 0]
// for channeled convolution.
- SmallVector<int64_t> sizes{nSize, wSizeStep, cSize};
- SmallVector<int64_t> strides{1, 1, 1};
+ SmallVector<int64_t> sizes = {nSize, wSizeStep, cSize};
+ SmallVector<int64_t> strides = {1, 1, 1};
for (int64_t kw = 0; kw < kwSize; ++kw) {
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
@@ -135,8 +135,8 @@ extractConvResultSlices(RewriterBase &rewriter, Location loc, Value res,
SmallVector<Value> result;
if (isSingleChanneled) {
// Extract res slice: {wSizeStep} @ [w] for non-channeled convolution.
- SmallVector<int64_t> sizes{wSizeStep};
- SmallVector<int64_t> strides{1};
+ SmallVector<int64_t> sizes = {wSizeStep};
+ SmallVector<int64_t> strides = {1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
loc, res, /*offsets=*/ArrayRef<int64_t>{w}, sizes, strides));
@@ -144,8 +144,8 @@ extractConvResultSlices(RewriterBase &rewriter, Location loc, Value res,
} else {
// Extract res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
// convolution.
- SmallVector<int64_t> sizes{nSize, wSizeStep, fSize};
- SmallVector<int64_t> strides{1, 1, 1};
+ SmallVector<int64_t> sizes = {nSize, wSizeStep, fSize};
+ SmallVector<int64_t> strides = {1, 1, 1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
loc, res, /*offsets=*/ArrayRef<int64_t>{0, w, 0}, sizes, strides));
@@ -163,7 +163,7 @@ static Value insertConvResultSlices(RewriterBase &rewriter, Location loc,
if (isSingleChanneled) {
// Write back res slice: {wSizeStep} @ [w] for non-channeled convolution.
// This does not depend on kw.
- SmallVector<int64_t> strides{1};
+ SmallVector<int64_t> strides = {1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
res = rewriter.create<vector::InsertStridedSliceOp>(
loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{w}, strides);
@@ -171,7 +171,7 @@ static Value insertConvResultSlices(RewriterBase &rewriter, Location loc,
} else {
// Write back res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
// convolution. This does not depend on kw.
- SmallVector<int64_t> strides{1, 1, 1};
+ SmallVector<int64_t> strides = {1, 1, 1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
res = rewriter.create<vector::InsertStridedSliceOp>(
loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{0, w, 0},
@@ -3505,8 +3505,8 @@ struct Conv1DGenerator
//===------------------------------------------------------------------===//
// Unroll along kw and read slices of lhs and rhs.
SmallVector<Value> lhsVals, rhsVals, resVals;
- auto inOutSliceSizes = SmallVector<int64_t>{nSize, wSizeStep, cSize};
- auto inOutStrides = SmallVector<int64_t>{1, 1, 1};
+ SmallVector<int64_t> inOutSliceSizes = {nSize, wSizeStep, cSize};
+ SmallVector<int64_t> inOutStrides = {1, 1, 1};
// Extract lhs slice of size {n, wSizeStep, c}
// @ [0, sw * w + dw * kw, 0].
@@ -3538,8 +3538,7 @@ struct Conv1DGenerator
// Note - the scalable flags are ignored as flattening combined with
// scalable vectorization is not supported.
- auto inOutFlattenSliceSizes =
- SmallVector<int64_t>{nSize, wSizeStep * cSize};
+ SmallVector<int64_t> inOutFlattenSliceSizes = {nSize, wSizeStep * cSize};
auto lhsTypeAfterFlattening =
VectorType::get(inOutFlattenSliceSizes, lhsEltType);
auto resTypeAfterFlattening =
diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
index 3c508ed6e324b2..556922a64b0938 100644
--- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
+++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
@@ -740,9 +740,9 @@ static std::tuple<SmallVector<int64_t>, SmallVector<int64_t>,
SmallVector<int64_t>>
makeVectorShapes(ArrayRef<int64_t> lhs, ArrayRef<int64_t> rhs,
ArrayRef<int64_t> res) {
- SmallVector<int64_t> vlhs{lhs};
- SmallVector<int64_t> vrhs{rhs};
- SmallVector<int64_t> vres{res};
+ SmallVector<int64_t> vlhs(lhs);
+ SmallVector<int64_t> vrhs(rhs);
+ SmallVector<int64_t> vres(res);
return std::make_tuple(vlhs, vrhs, vres);
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
index 42ac717b44c4b9..3035c419a1b565 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
@@ -557,7 +557,7 @@ struct CastAwayConstantMaskLeadingOneDim
int64_t flatLeadingSize =
std::accumulate(dimSizes.begin(), dimSizes.begin() + dropDim + 1,
static_cast<int64_t>(1), std::multiplies<int64_t>());
- SmallVector<int64_t> newDimSizes({flatLeadingSize});
+ SmallVector<int64_t> newDimSizes = {flatLeadingSize};
newDimSizes.append(dimSizes.begin() + dropDim + 1, dimSizes.end());
auto newMask = rewriter.create<vector::ConstantMaskOp>(
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 84c1deaebcd009..d9be8d0e578aea 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -930,8 +930,8 @@ struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> {
loc, elemType, rewriter.getZeroAttr(elemType));
Value res = rewriter.create<SplatOp>(loc, castDstType, zero);
- SmallVector<int64_t> sliceShape{castDstLastDim};
- SmallVector<int64_t> strides{1};
+ SmallVector<int64_t> sliceShape = {castDstLastDim};
+ SmallVector<int64_t> strides = {1};
VectorType newCastDstType =
VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio},
castDstType.getElementType());
diff --git a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
index edd939eda7c599..3fc05c8cb87071 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
+++ b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
@@ -66,8 +66,8 @@ Value mlir::x86vector::avx2::intrin::mm256ShufflePs(ImplicitLocOpBuilder &b,
uint8_t mask) {
uint8_t b01, b23, b45, b67;
MaskHelper::extractShuffle(mask, b01, b23, b45, b67);
- SmallVector<int64_t> shuffleMask{b01, b23, b45 + 8, b67 + 8,
- b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
+ SmallVector<int64_t> shuffleMask = {
+ b01, b23, b45 + 8, b67 + 8, b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
return b.create<vector::ShuffleOp>(v1, v2, shuffleMask);
}
``````````
</details>
https://github.com/llvm/llvm-project/pull/123714
More information about the Mlir-commits
mailing list