[Mlir-commits] [mlir] [mlir][tosa] Support unranked input/weight tensors for convolution ops (PR #134856)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Apr 8 07:14:32 PDT 2025
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions cpp -- mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index ee4333f4b..d86aad381 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -2918,13 +2918,13 @@ LogicalResult TransposeConv2DOp::verify() {
const int64_t outPadRight = padding[3];
const auto weightType =
- llvm::dyn_cast<RankedTensorType>(getWeight().getType());
+ llvm::dyn_cast<RankedTensorType>(getWeight().getType());
if (weightType) {
const int64_t kernelHeight = weightType.getDimSize(1);
if (!ShapedType::isDynamic(kernelHeight)) {
- if (failed(checkPadAgainstKernelDim(outPadTop, kernelHeight, "out_pad_top",
- "KH")))
+ if (failed(checkPadAgainstKernelDim(outPadTop, kernelHeight,
+ "out_pad_top", "KH")))
return failure();
if (failed(checkPadAgainstKernelDim(outPadBottom, kernelHeight,
@@ -2934,8 +2934,8 @@ LogicalResult TransposeConv2DOp::verify() {
const int64_t kernelWidth = weightType.getDimSize(2);
if (!ShapedType::isDynamic(kernelWidth)) {
- if (failed(checkPadAgainstKernelDim(outPadLeft, kernelWidth, "out_pad_left",
- "KW")))
+ if (failed(checkPadAgainstKernelDim(outPadLeft, kernelWidth,
+ "out_pad_left", "KW")))
return failure();
if (failed(checkPadAgainstKernelDim(outPadRight, kernelWidth,
@@ -2961,11 +2961,11 @@ LogicalResult TransposeConv2DOp::verify() {
if (outputHeight !=
(inputHeight - 1) * strideY + outPadTop + outPadBottom + kernelHeight)
return emitOpError(
- "dimension mismatch: expected OH == (IH - 1) * stride_y "
- "+ out_pad_top + out_pad_bottom + KH, but got ")
- << outputHeight << " != (" << inputHeight << " - 1) * " << strideY
- << " + " << outPadTop << " + " << outPadBottom << " + "
- << kernelHeight;
+ "dimension mismatch: expected OH == (IH - 1) * stride_y "
+ "+ out_pad_top + out_pad_bottom + KH, but got ")
+ << outputHeight << " != (" << inputHeight << " - 1) * "
+ << strideY << " + " << outPadTop << " + " << outPadBottom
+ << " + " << kernelHeight;
}
const int64_t inputWidth = inputType.getDimSize(2);
@@ -2977,11 +2977,11 @@ LogicalResult TransposeConv2DOp::verify() {
if (outputWidth !=
(inputWidth - 1) * strideX + outPadLeft + outPadRight + kernelWidth)
return emitOpError(
- "dimension mismatch: expected OW == (IW - 1) * stride_x "
- "+ out_pad_left + out_pad_right + KW, but got ")
- << outputWidth << " != (" << inputWidth << " - 1) * " << strideX
- << " + " << outPadLeft << " + " << outPadRight << " + "
- << kernelWidth;
+ "dimension mismatch: expected OW == (IW - 1) * stride_x "
+ "+ out_pad_left + out_pad_right + KW, but got ")
+ << outputWidth << " != (" << inputWidth << " - 1) * " << strideX
+ << " + " << outPadLeft << " + " << outPadRight << " + "
+ << kernelWidth;
}
}
``````````
</details>
https://github.com/llvm/llvm-project/pull/134856
More information about the Mlir-commits
mailing list