[Mlir-commits] [mlir] [mlir][tosa] Make TOSA RESIZE's scale, offset, border as Input (PR #124956)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Jan 29 09:20:49 PST 2025
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff 5a4945fa4d515b3209a5e181621bf828e678769f 1f08f3a035f7f65c5e91f3131eab23f631f25456 --extensions cpp,h -- mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp mlir/lib/Dialect/Tosa/IR/TosaOps.cpp mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index adc9dc0cef..ac676055be 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -1454,9 +1454,9 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
SmallVector<int64_t> scaleInt, offsetInt, borderInt;
if (!tosa::getConstShapeValue(adaptor.getScale().getDefiningOp(), scaleInt) ||
!tosa::getConstShapeValue(adaptor.getOffset().getDefiningOp(),
- offsetInt) ||
+ offsetInt) ||
!tosa::getConstShapeValue(adaptor.getBorder().getDefiningOp(),
- borderInt)) {
+ borderInt)) {
return failure();
}
@@ -1478,8 +1478,10 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
LogicalResult tosa::ResizeOp::verify() {
const Value input = getInput();
const Value output = getOutput();
- const RankedTensorType inputType = llvm::dyn_cast<RankedTensorType>(input.getType());
- const RankedTensorType outputType = llvm::dyn_cast<RankedTensorType>(output.getType());
+ const RankedTensorType inputType =
+ llvm::dyn_cast<RankedTensorType>(input.getType());
+ const RankedTensorType outputType =
+ llvm::dyn_cast<RankedTensorType>(output.getType());
if (!inputType)
return emitOpError("expect a ranked input tensor");
@@ -1502,7 +1504,8 @@ LogicalResult tosa::ResizeOp::verify() {
}
if (llvm::any_of(scaleValues, [](int64_t s) { return s <= 0; }))
- return emitOpError("expect all scale values to be > 0, got ") << scaleValues;
+ return emitOpError("expect all scale values to be > 0, got ")
+ << scaleValues;
const int64_t scaleYN = scaleValues[0];
const int64_t scaleYD = scaleValues[1];
@@ -1515,36 +1518,42 @@ LogicalResult tosa::ResizeOp::verify() {
const int64_t borderY = borderValues[0];
const int64_t borderX = borderValues[1];
- auto idivCheck = [](const int64_t lhs, const int64_t rhs) -> std::optional<int64_t> {
+ auto idivCheck = [](const int64_t lhs,
+ const int64_t rhs) -> std::optional<int64_t> {
if (lhs % rhs != 0)
return std::nullopt;
return lhs / rhs;
};
if (ih != ShapedType::kDynamic) {
- const std::optional<int64_t> calculatedOutHeightMinusOne = idivCheck(
- (ih - 1) * scaleYN - offsetY + borderY, scaleYD);
+ const std::optional<int64_t> calculatedOutHeightMinusOne =
+ idivCheck((ih - 1) * scaleYN - offsetY + borderY, scaleYD);
if (!calculatedOutHeightMinusOne.has_value())
- return emitOpError("expected (input_height - 1) * scale_y_n - offset_y + border_y ")
- << "to be wholly divisible by scale_y_d, got ((" << ih << " - 1) * " << scaleYN
- << " - " << offsetY << " + " << borderY << ") / " << scaleYD;
+ return emitOpError("expected (input_height - 1) * scale_y_n - offset_y + "
+ "border_y ")
+ << "to be wholly divisible by scale_y_d, got ((" << ih
+ << " - 1) * " << scaleYN << " - " << offsetY << " + " << borderY
+ << ") / " << scaleYD;
const int64_t calculatedOutHeight = calculatedOutHeightMinusOne.value() + 1;
if (oh != ShapedType::kDynamic && calculatedOutHeight != oh)
return emitOpError("calculated output height did not match expected: ")
- << "calculated=" << calculatedOutHeight << ", expected=" << oh;
+ << "calculated=" << calculatedOutHeight << ", expected=" << oh;
}
if (iw != ShapedType::kDynamic) {
const int64_t scaledInWidth = (iw - 1) * scaleXN - offsetX + borderX;
- const std::optional<int64_t> calculatedOutWidthMinusOne = idivCheck(scaledInWidth, scaleXD);
+ const std::optional<int64_t> calculatedOutWidthMinusOne =
+ idivCheck(scaledInWidth, scaleXD);
if (!calculatedOutWidthMinusOne.has_value())
- return emitOpError("expected (input_width - 1) * scale_x_n - offset_x + border_x ")
- << "to be wholly divisible by scale_x_d, got ((" << iw << " - 1) * " << scaleXN
- << " - " << offsetX << " + " << borderX << ") / " << scaleXD;
+ return emitOpError("expected (input_width - 1) * scale_x_n - offset_x + "
+ "border_x ")
+ << "to be wholly divisible by scale_x_d, got ((" << iw
+ << " - 1) * " << scaleXN << " - " << offsetX << " + " << borderX
+ << ") / " << scaleXD;
const int64_t calculatedOutWidth = calculatedOutWidthMinusOne.value() + 1;
if (ow != ShapedType::kDynamic && calculatedOutWidth != ow)
return emitOpError("calculated output width did not match expected: ")
- << "calculated=" << calculatedOutWidth << ", expected=" << ow;
+ << "calculated=" << calculatedOutWidth << ", expected=" << ow;
}
return success();
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index 5d45835002..b6a39a8635 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -530,8 +530,10 @@ bool checkErrorIfResize(Operation *op) {
if (auto resize = dyn_cast<tosa::ResizeOp>(op)) {
const Value input = resize.getInput();
const Value output = resize.getOutput();
- const RankedTensorType inputType = llvm::dyn_cast<RankedTensorType>(input.getType());
- const RankedTensorType outputType = llvm::dyn_cast<RankedTensorType>(output.getType());
+ const RankedTensorType inputType =
+ llvm::dyn_cast<RankedTensorType>(input.getType());
+ const RankedTensorType outputType =
+ llvm::dyn_cast<RankedTensorType>(output.getType());
if (!inputType || !outputType) {
op->emitOpError("expect ranked input/output tensor");
@@ -542,15 +544,12 @@ bool checkErrorIfResize(Operation *op) {
// implementations, position * stride does not overflow int32_t.
if (inputType.hasStaticShape() && outputType.hasStaticShape()) {
const SmallVector<int64_t, 4> sizes = {
- outputType.getDimSize(1),
- outputType.getDimSize(2),
- inputType.getDimSize(1),
- inputType.getDimSize(2)
- };
+ outputType.getDimSize(1), outputType.getDimSize(2),
+ inputType.getDimSize(1), inputType.getDimSize(2)};
const int64_t *maxDim = llvm::max_element(sizes);
if (maxDim != sizes.end() && *maxDim >= 16384) {
- op->emitOpError("expect input/output height/width dims to be < 16384, ") <<
- "got [OH, OW, IH, IW] = " << sizes;
+ op->emitOpError("expect input/output height/width dims to be < 16384, ")
+ << "got [OH, OW, IH, IW] = " << sizes;
return false;
}
}
@@ -567,14 +566,15 @@ bool checkErrorIfResize(Operation *op) {
// Ensure scale values don't overflow int32 accumulator
if (scaleYN > (1 << 11) || scaleXN > (1 << 11)) {
- op->emitOpError("expect all scale numerator values to be <= (1 << 11), got scale_y_n=") << scaleYN
- << ", scale_x_n=" << scaleXN;
+ op->emitOpError("expect all scale numerator values to be <= (1 << 11), "
+ "got scale_y_n=")
+ << scaleYN << ", scale_x_n=" << scaleXN;
return false;
}
if (scaleYD >= 16 * scaleYN || scaleXD >= 16 * scaleXN) {
op->emitOpError("expect a downscale ratio larger than 1/16, got y=")
- << scaleYN << "/" << scaleYD << ", x=" << scaleXN << "/" << scaleXD;
+ << scaleYN << "/" << scaleYD << ", x=" << scaleXN << "/" << scaleXD;
return false;
}
@@ -590,25 +590,30 @@ bool checkErrorIfResize(Operation *op) {
const int64_t borderY = border[0];
const int64_t borderX = border[1];
- // Set a consistent lower limit of 1/16 downscale to simplify implementations
+ // Set a consistent lower limit of 1/16 downscale to simplify
+ // implementations
if (offsetY < -scaleYN || offsetY >= 16 * scaleYN) {
- op->emitOpError("expect offsetY / scaleYNumerator to be in range [-1, 16), got ")
- << offsetY << "/" << scaleYN;
+ op->emitOpError(
+ "expect offsetY / scaleYNumerator to be in range [-1, 16), got ")
+ << offsetY << "/" << scaleYN;
return false;
}
if (offsetX < -scaleXN || offsetX >= 16 * scaleXN) {
- op->emitOpError("expect offsetX / scaleXNumerator to be in range [-1, 16), got ")
- << offsetX << "/" << scaleXN;
+ op->emitOpError(
+ "expect offsetX / scaleXNumerator to be in range [-1, 16), got ")
+ << offsetX << "/" << scaleXN;
return false;
}
if (borderY < -16 * scaleYN || borderY >= scaleYN) {
- op->emitOpError("expect borderY / scaleYNumerator to be in range [-16, 1), got ")
- << borderY << "/" << scaleYN;
+ op->emitOpError(
+ "expect borderY / scaleYNumerator to be in range [-16, 1), got ")
+ << borderY << "/" << scaleYN;
return false;
}
if (borderX < -16 * scaleXN || borderX >= scaleXN) {
- op->emitOpError("expect borderX / scaleXNumerator to be in range [-16, 1), got ")
- << borderX << "/" << scaleXN;
+ op->emitOpError(
+ "expect borderX / scaleXNumerator to be in range [-16, 1), got ")
+ << borderX << "/" << scaleXN;
return false;
}
}
``````````
</details>
https://github.com/llvm/llvm-project/pull/124956
More information about the Mlir-commits
mailing list