[Mlir-commits] [mlir] [mlir][SVE] Add e2e for 1D depthwise WC convolution (PR #85225)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Mar 14 06:41:23 PDT 2024
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff 8f68022f8e6e54d1aeae4ed301f5a015963089b7 02008d4a0861efe2e13348539e2c96816a7459dd -- mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 5f73c7d1bb..feb3b3f03c 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -460,7 +460,7 @@ LogicalResult promoteSubviewsPrecondition(Operation *op,
LogicalResult vectorizeOpPrecondition(Operation *op,
ArrayRef<int64_t> inputVectorSizes = {},
ArrayRef<bool> inputScalableVecDims = {},
- bool vectorizeNDExtract = false,
+ bool vectorizeNDExtract = false,
bool flatten1DDepthwiseConv = false);
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 00d5bb3358..c74ab1e644 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1744,7 +1744,8 @@ vectorizeDynamicConvOpPrecondition(linalg::LinalgOp conv,
}
static LogicalResult
-vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op, bool flatten1DDepthwiseConv) {
+vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op,
+ bool flatten1DDepthwiseConv) {
if (isa<ConvolutionOpInterface>(op.getOperation()))
return vectorizeDynamicConvOpPrecondition(op, flatten1DDepthwiseConv);
@@ -1813,11 +1814,9 @@ vectorizeUnPackOpPrecondition(tensor::UnPackOp unpackOp,
return success();
}
-static LogicalResult
-vectorizeLinalgOpPrecondition(LinalgOp linalgOp,
- ArrayRef<int64_t> inputVectorSizes,
- bool vectorizeNDExtract,
- bool flatten1DDepthwiseConv) {
+static LogicalResult vectorizeLinalgOpPrecondition(
+ LinalgOp linalgOp, ArrayRef<int64_t> inputVectorSizes,
+ bool vectorizeNDExtract, bool flatten1DDepthwiseConv) {
// tensor with dimension of 0 cannot be vectorized.
if (llvm::is_contained(linalgOp.getStaticShape(), 0))
return failure();
@@ -2015,7 +2014,8 @@ LogicalResult mlir::linalg::vectorize(RewriterBase &rewriter, Operation *op,
LLVM_DEBUG(llvm::dbgs() << "\n");
if (failed(vectorizeOpPrecondition(op, inputVectorSizes, inputScalableVecDims,
- vectorizeNDExtract, flatten1DDepthwiseConv))) {
+ vectorizeNDExtract,
+ flatten1DDepthwiseConv))) {
LDBG("Vectorization pre-conditions failed\n");
return failure();
}
@@ -3193,7 +3193,8 @@ struct Conv1DGenerator
useMasking = true;
}
- assert(!(useMasking && flatten) && "Unsupported flattened conv with dynamic shapes");
+ assert(!(useMasking && flatten) &&
+ "Unsupported flattened conv with dynamic shapes");
// out{n, w, c}
bindShapeDims(resShapedType, nSize, wSize);
``````````
</details>
https://github.com/llvm/llvm-project/pull/85225
More information about the Mlir-commits
mailing list