[Mlir-commits] [mlir] [mlir][tosa] Make TOSA RESIZE's scale, offset, border as Input (PR #124956)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Jan 29 09:16:49 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-tosa
Author: Hsiangkai Wang (Hsiangkai)
<details>
<summary>Changes</summary>
Move the `sclae`, `scale`, and `offset` parameters of the RESIZE operator in the MLIR TOSA dialect from attributes to inputs and update lit tests appropriately.
Add the verifier of the `tosa::ResizeOp` operation.
---
Patch is 56.48 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/124956.diff
13 Files Affected:
- (modified) mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td (+4-3)
- (modified) mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h (+3)
- (modified) mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp (+12-105)
- (modified) mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp (+16-3)
- (modified) mlir/lib/Dialect/Tosa/IR/TosaOps.cpp (+83-3)
- (modified) mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp (+111-5)
- (modified) mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp (+18)
- (modified) mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-resize.mlir (+65-75)
- (modified) mlir/test/Dialect/Tosa/canonicalize.mlir (+8-2)
- (modified) mlir/test/Dialect/Tosa/invalid.mlir (+60)
- (modified) mlir/test/Dialect/Tosa/level_check.mlir (+14-8)
- (modified) mlir/test/Dialect/Tosa/ops.mlir (+4-1)
- (modified) mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir (+32-8)
``````````diff
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index 850b85236a4c7f..54f9033c585f14 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -1853,9 +1853,9 @@ def Tosa_ResizeOp : Tosa_InferShapedTypeOp<"resize"> {
let arguments = (ins
Tosa_Tensor4D:$input,
- Tosa_IntArrayAttr4:$scale,
- Tosa_IntArrayAttr2:$offset,
- Tosa_IntArrayAttr2:$border,
+ Rank4TosaShape:$scale,
+ Rank2TosaShape:$offset,
+ Rank2TosaShape:$border,
Tosa_ResizeTypeAttr:$mode
);
@@ -1864,6 +1864,7 @@ def Tosa_ResizeOp : Tosa_InferShapedTypeOp<"resize"> {
);
let hasFolder = 1;
+ let hasVerifier = 1;
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
index 78a8828855437e..85eb467a71a3a6 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h
@@ -237,6 +237,9 @@ SmallVector<int64_t> convertFromMlirShape(ArrayRef<int64_t> shape);
bool getConstShapeValue(Operation *op,
llvm::SmallVector<int64_t> &result_shape);
+// returns a small vector of int64_t values that attr contains
+SmallVector<int64_t> convertFromIntAttr(const DenseElementsAttr &attr,
+ const int rank);
} // namespace tosa
} // namespace mlir
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index b0eb2d6cbc30b6..1ff9d291005274 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1378,7 +1378,10 @@ class ResizeUnaryConverter : public OpRewritePattern<tosa::ResizeOp> {
return success();
}
- ArrayRef<int64_t> scale = op.getScale();
+ SmallVector<int64_t> scale;
+ if (!tosa::getConstShapeValue(op.getScale().getDefiningOp(), scale)) {
+ return failure();
+ }
// Collapse the unit width and height away.
SmallVector<ReassociationExprs, 4> reassociationMap(2);
@@ -1440,105 +1443,6 @@ class ResizeUnaryConverter : public OpRewritePattern<tosa::ResizeOp> {
}
};
-// TOSA resize with width or height of 1 may be broadcasted to a wider
-// dimension. This is done by materializing a new tosa.resize without
-// the broadcasting behavior, and an explicit broadcast afterwards.
-class MaterializeResizeBroadcast : public OpRewritePattern<tosa::ResizeOp> {
-public:
- using OpRewritePattern<tosa::ResizeOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(tosa::ResizeOp op,
- PatternRewriter &rewriter) const final {
- Location loc = op.getLoc();
- ImplicitLocOpBuilder builder(loc, rewriter);
- auto input = op.getInput();
- auto inputTy = dyn_cast<RankedTensorType>(input.getType());
- auto resultTy = dyn_cast<RankedTensorType>(op.getType());
-
- if (!inputTy || !resultTy)
- return rewriter.notifyMatchFailure(op,
- "requires ranked input/output types");
-
- auto batch = inputTy.getDimSize(0);
- auto channels = inputTy.getDimSize(3);
- auto inputH = inputTy.getDimSize(1);
- auto inputW = inputTy.getDimSize(2);
- auto outputH = resultTy.getDimSize(1);
- auto outputW = resultTy.getDimSize(2);
-
- if ((inputH != 1 || outputH == 1) && (inputW != 1 || outputW == 1))
- return rewriter.notifyMatchFailure(
- op, "tosa.resize has no broadcasting behavior");
-
- // For any dimension that is broadcastable we generate a width of 1
- // on the output.
- llvm::SmallVector<int64_t> resizeShape;
- resizeShape.push_back(batch);
- resizeShape.push_back(inputH == 1 ? 1 : outputH);
- resizeShape.push_back(inputW == 1 ? 1 : outputW);
- resizeShape.push_back(channels);
-
- auto resizeTy = resultTy.clone(resizeShape);
- auto resize =
- builder.create<tosa::ResizeOp>(resizeTy, input, op->getAttrs());
-
- // Collapse an unit result dims.
- SmallVector<ReassociationExprs, 4> reassociationMap(2);
- reassociationMap[0].push_back(builder.getAffineDimExpr(0));
- reassociationMap.back().push_back(builder.getAffineDimExpr(1));
- if (inputH != 1)
- reassociationMap.push_back({});
- reassociationMap.back().push_back(builder.getAffineDimExpr(2));
- if (inputW != 1)
- reassociationMap.push_back({});
- reassociationMap.back().push_back(builder.getAffineDimExpr(3));
-
- llvm::SmallVector<int64_t> collapseShape = {batch};
- if (inputH != 1)
- collapseShape.push_back(outputH);
- if (inputW != 1)
- collapseShape.push_back(outputW);
- collapseShape.push_back(channels);
-
- auto collapseTy = resultTy.clone(collapseShape);
- Value collapse = builder.create<tensor::CollapseShapeOp>(collapseTy, resize,
- reassociationMap);
-
- // Broadcast the collapsed shape to the output result.
- llvm::SmallVector<Value> outputDynSize;
- if (inputTy.isDynamicDim(0))
- outputDynSize.push_back(builder.create<tensor::DimOp>(input, 0));
- if (inputTy.isDynamicDim(3))
- outputDynSize.push_back(builder.create<tensor::DimOp>(input, 3));
-
- SmallVector<utils::IteratorType> iterators(resultTy.getRank(),
- utils::IteratorType::parallel);
- Value empty = builder.create<tensor::EmptyOp>(
- resultTy.getShape(), resultTy.getElementType(), outputDynSize);
-
- SmallVector<AffineExpr, 4> inputExprs{rewriter.getAffineDimExpr(0)};
- if (inputH != 1)
- inputExprs.push_back(rewriter.getAffineDimExpr(1));
- if (inputW != 1)
- inputExprs.push_back(rewriter.getAffineDimExpr(2));
- inputExprs.push_back(rewriter.getAffineDimExpr(3));
-
- auto inputMap = AffineMap::get(resultTy.getRank(), /*symbolCount=*/0,
- inputExprs, rewriter.getContext());
-
- auto outputMap = rewriter.getMultiDimIdentityMap(resultTy.getRank());
- rewriter.replaceOpWithNewOp<linalg::GenericOp>(
- op, resultTy, ValueRange{collapse}, ValueRange{empty},
- ArrayRef<AffineMap>{inputMap, outputMap}, iterators,
- [=](OpBuilder &b, Location loc, ValueRange args) {
- Value value = args[0];
- b.create<linalg::YieldOp>(loc, value);
- });
-
- return success();
- }
-};
-
class GenericResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
public:
using OpRewritePattern<tosa::ResizeOp>::OpRewritePattern;
@@ -1595,9 +1499,14 @@ class GenericResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
Value inY = b.create<arith::IndexCastOp>(b.getI32Type(), y);
Value inX = b.create<arith::IndexCastOp>(b.getI32Type(), x);
- ArrayRef<int64_t> offset = op.getOffset();
- ArrayRef<int64_t> border = op.getBorder();
- ArrayRef<int64_t> scale = op.getScale();
+ SmallVector<int64_t> scale, offset, border;
+ if (!tosa::getConstShapeValue(op.getScale().getDefiningOp(), scale) ||
+ !tosa::getConstShapeValue(op.getOffset().getDefiningOp(), offset) ||
+ !tosa::getConstShapeValue(op.getBorder().getDefiningOp(), border)) {
+ return rewriter.notifyMatchFailure(
+ op, "tosa.resize scale/offset/border should have compile time "
+ "constant values.");
+ }
Value yScaleN, yScaleD, xScaleN, xScaleD;
yScaleN = b.create<arith::ConstantOp>(b.getI32IntegerAttr(scale[0]));
@@ -2607,8 +2516,6 @@ void mlir::tosa::populateTosaToLinalgConversionPatterns(
/*benefit=*/100);
patterns->add<ResizeUnaryConverter>(patterns->getContext(),
/*benefit=*/200);
- patterns->add<MaterializeResizeBroadcast>(patterns->getContext(),
- /*benefit=*/300);
patterns->add<
// clang-format off
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index ddfcde6de14f14..b52bb51a1c7cf5 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -955,9 +955,22 @@ OpFoldResult PadOp::fold(FoldAdaptor adaptor) {
// Fold away cases where a tosa.resize operation returns a copy
// of the input image.
OpFoldResult ResizeOp::fold(FoldAdaptor adaptor) {
- ArrayRef<int64_t> offset = getOffset();
- ArrayRef<int64_t> border = getBorder();
- ArrayRef<int64_t> scale = getScale();
+ auto scaleAttr =
+ llvm::dyn_cast_if_present<DenseElementsAttr>(adaptor.getScale());
+ auto offsetAttr =
+ llvm::dyn_cast_if_present<DenseElementsAttr>(adaptor.getOffset());
+ auto borderAttr =
+ llvm::dyn_cast_if_present<DenseElementsAttr>(adaptor.getBorder());
+ if (!scaleAttr || !offsetAttr || !borderAttr) {
+ return {};
+ }
+
+ auto scale = tosa::convertFromIntAttr(scaleAttr, /* rank = */ 4);
+ auto offset = tosa::convertFromIntAttr(offsetAttr, /* rank = */ 2);
+ auto border = tosa::convertFromIntAttr(borderAttr, /* rank = */ 2);
+ if (scale.size() != 4 || offset.size() != 2 || border.size() != 2) {
+ return {};
+ }
// Check unit scaling.
if (scale[0] != scale[1] || scale[2] != scale[3]) {
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index ae4e09a1e324c6..adc9dc0ceff616 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -1451,9 +1451,14 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
(inputWidth == ShapedType::kDynamic))
return failure();
- llvm::ArrayRef<int64_t> scaleInt = adaptor.getScale();
- llvm::ArrayRef<int64_t> offsetInt = adaptor.getOffset();
- llvm::ArrayRef<int64_t> borderInt = adaptor.getBorder();
+ SmallVector<int64_t> scaleInt, offsetInt, borderInt;
+ if (!tosa::getConstShapeValue(adaptor.getScale().getDefiningOp(), scaleInt) ||
+ !tosa::getConstShapeValue(adaptor.getOffset().getDefiningOp(),
+ offsetInt) ||
+ !tosa::getConstShapeValue(adaptor.getBorder().getDefiningOp(),
+ borderInt)) {
+ return failure();
+ }
// Compute the output shape based on attributes: scale, offset, and border.
outputShape[1] =
@@ -1470,6 +1475,81 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
return success();
}
+LogicalResult tosa::ResizeOp::verify() {
+ const Value input = getInput();
+ const Value output = getOutput();
+ const RankedTensorType inputType = llvm::dyn_cast<RankedTensorType>(input.getType());
+ const RankedTensorType outputType = llvm::dyn_cast<RankedTensorType>(output.getType());
+
+ if (!inputType)
+ return emitOpError("expect a ranked input tensor");
+ if (!outputType)
+ return emitOpError("expect a ranked output tensor");
+
+ const int64_t oh = outputType.getDimSize(1);
+ const int64_t ow = outputType.getDimSize(2);
+ const int64_t ih = inputType.getDimSize(1);
+ const int64_t iw = inputType.getDimSize(2);
+
+ SmallVector<int64_t> scaleValues;
+ SmallVector<int64_t> offsetValues;
+ SmallVector<int64_t> borderValues;
+ if (!tosa::getConstShapeValue(getScale().getDefiningOp(), scaleValues) ||
+ !tosa::getConstShapeValue(getOffset().getDefiningOp(), offsetValues) ||
+ !tosa::getConstShapeValue(getBorder().getDefiningOp(), borderValues)) {
+ // Skip following checks if shape is not constant
+ return success();
+ }
+
+ if (llvm::any_of(scaleValues, [](int64_t s) { return s <= 0; }))
+ return emitOpError("expect all scale values to be > 0, got ") << scaleValues;
+
+ const int64_t scaleYN = scaleValues[0];
+ const int64_t scaleYD = scaleValues[1];
+ const int64_t scaleXN = scaleValues[2];
+ const int64_t scaleXD = scaleValues[3];
+
+ const int64_t offsetY = offsetValues[0];
+ const int64_t offsetX = offsetValues[1];
+
+ const int64_t borderY = borderValues[0];
+ const int64_t borderX = borderValues[1];
+
+ auto idivCheck = [](const int64_t lhs, const int64_t rhs) -> std::optional<int64_t> {
+ if (lhs % rhs != 0)
+ return std::nullopt;
+ return lhs / rhs;
+ };
+
+ if (ih != ShapedType::kDynamic) {
+ const std::optional<int64_t> calculatedOutHeightMinusOne = idivCheck(
+ (ih - 1) * scaleYN - offsetY + borderY, scaleYD);
+ if (!calculatedOutHeightMinusOne.has_value())
+ return emitOpError("expected (input_height - 1) * scale_y_n - offset_y + border_y ")
+ << "to be wholly divisible by scale_y_d, got ((" << ih << " - 1) * " << scaleYN
+ << " - " << offsetY << " + " << borderY << ") / " << scaleYD;
+ const int64_t calculatedOutHeight = calculatedOutHeightMinusOne.value() + 1;
+ if (oh != ShapedType::kDynamic && calculatedOutHeight != oh)
+ return emitOpError("calculated output height did not match expected: ")
+ << "calculated=" << calculatedOutHeight << ", expected=" << oh;
+ }
+
+ if (iw != ShapedType::kDynamic) {
+ const int64_t scaledInWidth = (iw - 1) * scaleXN - offsetX + borderX;
+ const std::optional<int64_t> calculatedOutWidthMinusOne = idivCheck(scaledInWidth, scaleXD);
+ if (!calculatedOutWidthMinusOne.has_value())
+ return emitOpError("expected (input_width - 1) * scale_x_n - offset_x + border_x ")
+ << "to be wholly divisible by scale_x_d, got ((" << iw << " - 1) * " << scaleXN
+ << " - " << offsetX << " + " << borderX << ") / " << scaleXD;
+ const int64_t calculatedOutWidth = calculatedOutWidthMinusOne.value() + 1;
+ if (ow != ShapedType::kDynamic && calculatedOutWidth != ow)
+ return emitOpError("calculated output width did not match expected: ")
+ << "calculated=" << calculatedOutWidth << ", expected=" << ow;
+ }
+
+ return success();
+}
+
LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
MLIRContext *context, ::std::optional<Location> location,
ScatterOp::Adaptor adaptor,
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index a49870687fdc60..5d45835002e383 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -18,6 +18,7 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
+#include "mlir/Dialect/Tosa/Utils/ConversionUtils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Matchers.h"
@@ -119,6 +120,9 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
// check variable read/write data types against variable declarations
LogicalResult applyVariableCheck(Operation *op);
+ // check error if conditions
+ LogicalResult applyErrorIfCheck(Operation *op);
+
private:
void populateConstantOperandChecks() {
constCheckers.emplace_back(checkConstantOperandPad);
@@ -383,11 +387,14 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
// Resize op: level check max scales
bool levelCheckResize(Operation *op) {
if (auto resize = dyn_cast<tosa::ResizeOp>(op)) {
- auto scale = resize.getScale();
- int16_t scaleYN = scale[0];
- int16_t scaleYD = scale[1];
- int16_t scaleXN = scale[2];
- int16_t scaleXD = scale[3];
+ SmallVector<int64_t> scale;
+ if (!tosa::getConstShapeValue(resize.getScale().getDefiningOp(), scale)) {
+ return false;
+ }
+ const int64_t scaleYN = scale[0];
+ const int64_t scaleYD = scale[1];
+ const int64_t scaleXN = scale[2];
+ const int64_t scaleXD = scale[3];
if (!levelCheckScale(op, scaleYN / scaleYD,
"scale_y_n/scale_y_d <= MAX_SCALE") ||
!levelCheckScale(op, scaleXN / scaleXD,
@@ -519,6 +526,101 @@ LogicalResult TosaValidation::applyVariableCheck(Operation *op) {
return success();
}
+bool checkErrorIfResize(Operation *op) {
+ if (auto resize = dyn_cast<tosa::ResizeOp>(op)) {
+ const Value input = resize.getInput();
+ const Value output = resize.getOutput();
+ const RankedTensorType inputType = llvm::dyn_cast<RankedTensorType>(input.getType());
+ const RankedTensorType outputType = llvm::dyn_cast<RankedTensorType>(output.getType());
+
+ if (!inputType || !outputType) {
+ op->emitOpError("expect ranked input/output tensor");
+ return false;
+ }
+
+ // Ensure the image size is supported by GPU APIs and that for integer
+ // implementations, position * stride does not overflow int32_t.
+ if (inputType.hasStaticShape() && outputType.hasStaticShape()) {
+ const SmallVector<int64_t, 4> sizes = {
+ outputType.getDimSize(1),
+ outputType.getDimSize(2),
+ inputType.getDimSize(1),
+ inputType.getDimSize(2)
+ };
+ const int64_t *maxDim = llvm::max_element(sizes);
+ if (maxDim != sizes.end() && *maxDim >= 16384) {
+ op->emitOpError("expect input/output height/width dims to be < 16384, ") <<
+ "got [OH, OW, IH, IW] = " << sizes;
+ return false;
+ }
+ }
+
+ SmallVector<int64_t> scale;
+ if (!tosa::getConstShapeValue(resize.getScale().getDefiningOp(), scale)) {
+ return false;
+ }
+
+ const int64_t scaleYN = scale[0];
+ const int64_t scaleYD = scale[1];
+ const int64_t scaleXN = scale[2];
+ const int64_t scaleXD = scale[3];
+
+ // Ensure scale values don't overflow int32 accumulator
+ if (scaleYN > (1 << 11) || scaleXN > (1 << 11)) {
+ op->emitOpError("expect all scale numerator values to be <= (1 << 11), got scale_y_n=") << scaleYN
+ << ", scale_x_n=" << scaleXN;
+ return false;
+ }
+
+ if (scaleYD >= 16 * scaleYN || scaleXD >= 16 * scaleXN) {
+ op->emitOpError("expect a downscale ratio larger than 1/16, got y=")
+ << scaleYN << "/" << scaleYD << ", x=" << scaleXN << "/" << scaleXD;
+ return false;
+ }
+
+ SmallVector<int64_t> offset;
+ SmallVector<int64_t> border;
+ if (!tosa::getConstShapeValue(resize.getOffset().getDefiningOp(), offset) ||
+ !tosa::getConstShapeValue(resize.getBorder().getDefiningOp(), border)) {
+ return false;
+ }
+
+ const int64_t offsetY = offset[0];
+ const int64_t offsetX = offset[1];
+ const int64_t borderY = border[0];
+ const int64_t borderX = border[1];
+
+ // Set a consistent lower limit of 1/16 downscale to simplify implementations
+ if (offsetY < -scaleYN || offsetY >= 16 * scaleYN) {
+ op->emitOpError("expect offsetY / scaleYNumerator to be in range [-1, 16), got ")
+ << offsetY << "/" << scaleYN;
+ return false;
+ }
+ if (offsetX < -scaleXN || offsetX >= 16 * scaleXN) {
+ op->emitOpError("expect offsetX / scaleXNumerator to be in range [-1, 16), got ")
+ << offsetX << "/" << scaleXN;
+ return false;
+ }
+ if (borderY < -16 * scaleYN || borderY >= scaleYN) {
+ op->emitOpError("expect borderY / scaleYNumerator to be in range [-16, 1), got ")
+ << borderY << "/" << scaleYN;
+ return false;
+ }
+ if (borderX < -16 * scaleXN || borderX >= scaleXN) {
+ op->emitOpError("expect borderX / scaleXNumerator to be in range [-16, 1), got ")
+ << borderX << "/" << scaleXN;
+ return false;
+ }
+ }
+ return true;
+}
+
+LogicalResult TosaValidation::applyErrorIfCheck(Operation *op) {
+ if (!checkErrorIfResize(op))
+ return failure();
+ return success();
+}
+
bool TosaValidation::isValidElementType(Type type) {
if (isa<FloatType>(type)) {
if (!isEnabledProfile(TosaProfileEnum::MainInference))
@@ -582,6 +684,10 @@ void TosaValidation::runOnOperation() {
// do variable type checks
if (failed(applyVariableCheck(op)))
signalPassFailure();
+
+ // do error if checks
+ if (StrictOperationSpecAlignment && failed(applyErrorIfCheck(op)))
+ signalPassFailure();
});
}
} // namespace
diff --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
index 62b0bc1857e395..69d02f9bc37c0c 100644
--- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
+++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
@@ -193,3 +193,21 @@ bool mlir::tosa::getCons...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/124956
More information about the Mlir-commits
mailing list