[Mlir-commits] [mlir] [mlir][tosa] Add error if verification to pooling operators (PR #130052)
Luke Hutton
llvmlistbot at llvm.org
Thu Mar 6 02:22:55 PST 2025
https://github.com/lhutton1 created https://github.com/llvm/llvm-project/pull/130052
This commit adds the following checks to avg_pool2d and max_pool2d TOSA operations:
- check kernel values are >= 1
- check stride values are >= 1
- check padding values are >= 0
- check padding values are less than kernel sizes
- check output shape matches the expected output shape
>From 7a602e2e2461349002f19f6f4477d2a98546c8b7 Mon Sep 17 00:00:00 2001
From: Luke Hutton <luke.hutton at arm.com>
Date: Wed, 5 Mar 2025 15:18:38 +0000
Subject: [PATCH] [mlir][tosa] Add error if verification to pooling operators
This commit adds the following checks to avg_pool2d and max_pool2d
TOSA operations:
- check kernel values are >= 1
- check stride values are >= 1
- check padding values are >= 0
- check padding values are less than kernel sizes
- check output shape matches the expected output shape
Change-Id: I6ef97ba40ef3448b4ddd974990b8c3ce009221c5
Signed-off-by: Luke Hutton <luke.hutton at arm.com>
---
mlir/lib/Dialect/Tosa/IR/TosaOps.cpp | 98 ++++++++++++-
mlir/test/Dialect/Tosa/invalid.mlir | 101 ++++++++++++-
mlir/test/Dialect/Tosa/invalid_extension.mlir | 1 -
mlir/test/Dialect/Tosa/level_check.mlir | 133 +++++-------------
mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir | 18 +--
5 files changed, 239 insertions(+), 112 deletions(-)
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index ea4414fc1890e..39edd16c3e307 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -485,7 +485,95 @@ LogicalResult tosa::ArgMaxOp::verify() {
return success();
}
+template <typename T>
+static LogicalResult verifyPoolingOp(T op) {
+ const llvm::ArrayRef<int64_t> kernel = op.getKernel();
+ if (llvm::any_of(kernel, [](int64_t s) { return s < 1; }))
+ return op.emitOpError("expect all kernel values to be >= 1, got ")
+ << kernel;
+
+ const llvm::ArrayRef<int64_t> strides = op.getStride();
+ if (llvm::any_of(strides, [](int64_t s) { return s < 1; }))
+ return op.emitOpError("expect all stride values to be >= 1, got ")
+ << strides;
+
+ const llvm::ArrayRef<int64_t> padding = op.getPad();
+ if (llvm::any_of(padding, [](int64_t p) { return p < 0; }))
+ return op.emitOpError("expect all padding values to be >= 0, got ")
+ << padding;
+
+ // Padding must be less than kernel size to avoid a divide-by-zero
+ const int64_t kernelX = kernel[1];
+ const int64_t padLeft = padding[2];
+ const int64_t padRight = padding[3];
+ if (padRight >= kernelX || padLeft >= kernelX)
+ return op.emitOpError("expected left/right padding to be less than the "
+ "width of the kernel, got pad_left=")
+ << padLeft << ", pad_right=" << padRight << ", kernel_x=" << kernelX;
+
+ const int64_t kernelY = kernel[0];
+ const int64_t padTop = padding[0];
+ const int64_t padBottom = padding[1];
+ if (padTop >= kernelY || padBottom >= kernelY)
+ return op.emitOpError("expected top/bottom padding to be less than the "
+ "height of the kernel, got pad_top=")
+ << padTop << ", pad_bottom=" << padBottom
+ << ", kernel_y=" << kernelY;
+
+ const auto inputType =
+ llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
+ const auto outputType =
+ llvm::dyn_cast<RankedTensorType>(op.getResult().getType());
+ if (!inputType || !outputType)
+ return success();
+
+ const auto verifyOutputSize =
+ [&op](const int64_t inputSize, const int64_t outputSize,
+ const int64_t kernelSize, const int64_t strideSize,
+ const int64_t padBefore, const int64_t padAfter,
+ const llvm::StringRef dimName, const llvm::StringRef dimAxis,
+ const llvm::StringRef padBeforeName,
+ const llvm::StringRef padAfterName) -> LogicalResult {
+ if (ShapedType::isDynamic(inputSize))
+ return success();
+
+ const std::optional<int64_t> calculatedOutSizeMinusOne =
+ idivCheck(inputSize + padBefore + padAfter - kernelSize, strideSize);
+ if (!calculatedOutSizeMinusOne.has_value())
+ return op.emitOpError("expected input_")
+ << dimName << " + pad_" << padBeforeName << " + pad_"
+ << padAfterName << " - kernel_" << dimAxis
+ << " to be wholly divisible by stride_" << dimAxis << ", got ("
+ << inputSize << " + " << padBefore << " + " << padAfter << " - "
+ << kernelSize << ") / " << strideSize;
+
+ const int64_t calculatedOutSize = calculatedOutSizeMinusOne.value() + 1;
+ if (!ShapedType::isDynamic(outputSize) && calculatedOutSize != outputSize)
+ return op.emitOpError("calculated output ")
+ << dimName << " did not match expected: "
+ << "calculated=" << calculatedOutSize
+ << ", expected=" << outputSize;
+
+ return success();
+ };
+
+ if (failed(verifyOutputSize(inputType.getDimSize(1), outputType.getDimSize(1),
+ kernel[0], strides[0], padding[0], padding[1],
+ "height", "y", "top", "bottom")))
+ return failure();
+
+ if (failed(verifyOutputSize(inputType.getDimSize(2), outputType.getDimSize(2),
+ kernel[1], strides[1], padding[2], padding[3],
+ "width", "x", "left", "right")))
+ return failure();
+
+ return success();
+}
+
LogicalResult tosa::AvgPool2dOp::verify() {
+ if (failed(verifyPoolingOp(*this)))
+ return failure();
+
const Type inputETy = getStorageElementTypeOrSelf(getInput().getType());
const Type resultETy = getStorageElementTypeOrSelf(getOutput().getType());
const Type inputZpETy = getStorageElementTypeOrSelf(getInputZp().getType());
@@ -2524,8 +2612,14 @@ LogicalResult MaxPool2dOp::inferReturnTypeComponents(
}
LogicalResult MaxPool2dOp::verify() {
- return verifySameElementTypes(*this, /* intype = */ getInput().getType(),
- /* outType = */ getOutput().getType());
+ if (failed(verifySameElementTypes(*this, /* intype = */ getInput().getType(),
+ /* outType = */ getOutput().getType())))
+ return failure();
+
+ if (failed(verifyPoolingOp(*this)))
+ return failure();
+
+ return success();
}
LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir
index 9a5f4612db42d..ffca94e451668 100644
--- a/mlir/test/Dialect/Tosa/invalid.mlir
+++ b/mlir/test/Dialect/Tosa/invalid.mlir
@@ -287,7 +287,7 @@ func.func @test_pad_invalid_padConst_rank(%arg0: tensor<13x21xf32>, %arg1: tenso
// -----
func.func @test_pad_padding_shape_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
- %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4>
%pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// expected-error at +1 {{'tosa.pad' op expected padding tensor dim 0 to have size 6 (2*rank(shape1)) but got size 4}}
%1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<13x21x3xf32>
@@ -1431,3 +1431,102 @@ func.func @test_argmax_invalid_output_shape(%arg0: tensor<1x2x3xf32>) -> tensor<
%0 = tosa.argmax %arg0 {axis = 0 : i32}: (tensor<1x2x3xf32>) -> tensor<1x2x3xi32>
return %0 : tensor<1x2x3xi32>
}
+
+// -----
+
+func.func @test_avgpool2d_invalid_kernel(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expect all kernel values to be >= 1, got 0, -1}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 0, -1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_invalid_stride(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expect all stride values to be >= 1, got 1, 0}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 0>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_invalid_padding(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expect all padding values to be >= 0, got 0, 0, 0, -1}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, -1>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_padding_not_less_than_kernel_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expected left/right padding to be less than the width of the kernel, got pad_left=0, pad_right=1, kernel_x=1}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 1>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_padding_not_less_than_kernel_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expected top/bottom padding to be less than the height of the kernel, got pad_top=2, pad_bottom=0, kernel_y=1}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 2, 0, 0, 0>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_wholly_divisible_height(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expected input_height + pad_top + pad_bottom - kernel_y to be wholly divisible by stride_y, got (32 + 0 + 0 - 1) / 2}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 2, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_wholly_divisible_width(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op expected input_width + pad_left + pad_right - kernel_x to be wholly divisible by stride_x, got (32 + 0 + 0 - 1) / 2}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 2>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
+ return %0 : tensor<1x32x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_unexpected_output_height(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x33x32x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op calculated output height did not match expected: calculated=32, expected=33}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x33x32x8xf32>
+ return %0 : tensor<1x33x32x8xf32>
+}
+
+// -----
+
+func.func @test_avgpool2d_unexpected_output_width(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x?x33x8xf32> {
+ // expected-error at +1 {{'tosa.avg_pool2d' op calculated output width did not match expected: calculated=32, expected=33}}
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x?x33x8xf32>
+ return %0 : tensor<1x?x33x8xf32>
+}
+
+// -----
+
+func.func @test_maxpool2d_invalid_kernel(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x2x32x8xf32> {
+ // expected-error at +1 {{'tosa.max_pool2d' op expect all kernel values to be >= 1, got 0, 1}}
+ %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 0, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} :
+ (tensor<1x32x32x8xf32>) -> tensor<1x2x32x8xf32>
+ return %0 : tensor<1x2x32x8xf32>
+}
+
+// -----
+
+func.func @test_maxpool2d_unexpected_output_width(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x2x8xf32> {
+ // expected-error at +1 {{'tosa.max_pool2d' op calculated output width did not match expected: calculated=32, expected=2}}
+ %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} :
+ (tensor<1x32x32x8xf32>) -> tensor<1x32x2x8xf32>
+ return %0 : tensor<1x32x2x8xf32>
+}
diff --git a/mlir/test/Dialect/Tosa/invalid_extension.mlir b/mlir/test/Dialect/Tosa/invalid_extension.mlir
index 684875f231dec..c192609e382f5 100644
--- a/mlir/test/Dialect/Tosa/invalid_extension.mlir
+++ b/mlir/test/Dialect/Tosa/invalid_extension.mlir
@@ -69,4 +69,3 @@ func.func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor<i32>) {
}
return
}
-
diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir
index a83fad1035a6d..02c807c392ae6 100644
--- a/mlir/test/Dialect/Tosa/level_check.mlir
+++ b/mlir/test/Dialect/Tosa/level_check.mlir
@@ -507,75 +507,38 @@ func.func @test_identity_rank_valid(%arg0: tensor<i32>) -> tensor<i32> {
// -----
-func.func @test_avgpool2d_kernel_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+func.func @test_avgpool2d_kernel_y(%arg0: tensor<1x8194x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x2x32x8xf32> {
// expected-error at +1 {{'tosa.avg_pool2d' op failed level check: kernel <= MAX_KERNEL}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 8193, 1>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 1, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 8193, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x8194x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x2x32x8xf32>
+ return %0 : tensor<1x2x32x8xf32>
}
// -----
-func.func @test_avgpool2d_kernel_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+func.func @test_avgpool2d_kernel_x(%arg0: tensor<1x32x8194x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x2x8xf32> {
// expected-error at +1 {{'tosa.avg_pool2d' op failed level check: kernel <= MAX_KERNEL}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 8193>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 1, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 8193>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, acc_type = f32} :
+ (tensor<1x32x8194x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x2x8xf32>
+ return %0 : tensor<1x32x2x8xf32>
}
// -----
-func.func @test_avgpool2d_stride_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+func.func @test_avgpool2d_stride_y(%arg0: tensor<1x8194x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x2x32x8xf32> {
// expected-error at +1 {{'tosa.avg_pool2d' op failed level check: stride <= MAX_STRIDE}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 8193, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 8193, 1>, acc_type = f32} :
+ (tensor<1x8194x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x2x32x8xf32>
+ return %0 : tensor<1x2x32x8xf32>
}
// -----
-func.func @test_avgpool2d_stride_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
+func.func @test_avgpool2d_stride_x(%arg0: tensor<1x32x8194x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x2x8xf32> {
// expected-error at +1 {{'tosa.avg_pool2d' op failed level check: stride <= MAX_STRIDE}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 1, 8193>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-
-// -----
-
-func.func @test_avgpool2d_pad_top(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.avg_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 8193, 4, 4, 4>, stride = array<i64: 1, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-// -----
-
-func.func @test_avgpool2d_pad_bottom(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.avg_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 8193, 4, 4>, stride = array<i64: 1, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-// -----
-
-func.func @test_avgpool2d_pad_left(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.avg_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 8193, 4>, stride = array<i64: 1, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-// -----
-
-func.func @test_avgpool2d_pad_right(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.avg_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 4, 8193>, stride = array<i64: 1, 1>, acc_type = f32} :
- (tensor<1x32x32x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+ %0 = "tosa.avg_pool2d"(%arg0, %arg1, %arg2) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 8193>, acc_type = f32} :
+ (tensor<1x32x8194x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x2x8xf32>
+ return %0 : tensor<1x32x2x8xf32>
}
// -----
@@ -868,66 +831,38 @@ func.func @test_fft2d_imag_w(%arg0: tensor<32x32x16384xf32>, %arg1: tensor<32x32
// -----
-func.func @test_maxpool2d_stride_y(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.max_pool2d' op failed level check: stride <= MAX_STRIDE}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 8193, 1>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+func.func @test_maxpool2d_kernel_y(%arg0: tensor<1x8194x32x8xf32>) -> tensor<1x2x32x8xf32> {
+ // expected-error at +1 {{'tosa.max_pool2d' op failed level check: kernel <= MAX_KERNEL}}
+ %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 8193, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} :
+ (tensor<1x8194x32x8xf32>) -> tensor<1x2x32x8xf32>
+ return %0 : tensor<1x2x32x8xf32>
}
// -----
-func.func @test_maxpool2d_kernel_x(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
+func.func @test_maxpool2d_kernel_x(%arg0: tensor<1x32x8194x8xf32>) -> tensor<1x32x2x8xf32> {
// expected-error at +1 {{'tosa.max_pool2d' op failed level check: kernel <= MAX_KERNEL}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 8193>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 1, 1>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+ %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 8193>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} :
+ (tensor<1x32x8194x8xf32>) -> tensor<1x32x2x8xf32>
+ return %0 : tensor<1x32x2x8xf32>
}
// -----
-func.func @test_maxpool2d_stride_x(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
+func.func @test_maxpool2d_stride_y(%arg0: tensor<1x8194x32x8xf32>) -> tensor<1x2x32x8xf32> {
// expected-error at +1 {{'tosa.max_pool2d' op failed level check: stride <= MAX_STRIDE}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 1, 8193>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-
-// -----
-
-func.func @test_maxpool2d_pad_top(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.max_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 8193, 4, 4, 4>, stride = array<i64: 1, 1>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-// -----
-
-func.func @test_maxpool2d_pad_bottom(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.max_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 8193, 4, 4>, stride = array<i64: 1, 1>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
-}
-
-// -----
-
-func.func @test_maxpool2d_pad_left(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.max_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 8193, 4>, stride = array<i64: 1, 1>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+ %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 8193, 1>} :
+ (tensor<1x8194x32x8xf32>) -> tensor<1x2x32x8xf32>
+ return %0 : tensor<1x2x32x8xf32>
}
// -----
-func.func @test_maxpool2d_pad_right(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
- // expected-error at +1 {{'tosa.max_pool2d' op failed level check: pad <= MAX_KERNEL}}
- %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 4, 4, 4, 8193>, stride = array<i64: 1, 1>} :
- (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
- return %0 : tensor<1x32x32x8xf32>
+func.func @test_maxpool2d_stride_x(%arg0: tensor<1x32x8194x8xf32>) -> tensor<1x32x2x8xf32> {
+ // expected-error at +1 {{'tosa.max_pool2d' op failed level check: stride <= MAX_STRIDE}}
+ %0 = "tosa.max_pool2d"(%arg0) {kernel = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 8193>} :
+ (tensor<1x32x8194x8xf32>) -> tensor<1x32x2x8xf32>
+ return %0 : tensor<1x32x2x8xf32>
}
// -----
diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
index 80e55912e131f..2c75bcd4dd13b 100644
--- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
@@ -722,11 +722,11 @@ func.func @test_pool_padded(%arg0: tensor<3x5x6x7xf32>) {
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
%output_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
- // CHECK: -> tensor<3x5x11x7xf32>
- %0 = tosa.avg_pool2d %arg0, %input_zp, %output_zp {acc_type = f32, kernel = array<i64: 4, 3>, pad = array<i64: 1, 2, 3, 4>, stride = array<i64: 1, 1>} : (tensor<3x5x6x7xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<?x?x?x?xf32>
+ // CHECK: -> tensor<3x7x5x7xf32>
+ %0 = tosa.avg_pool2d %arg0, %input_zp, %output_zp {acc_type = f32, kernel = array<i64: 4, 3>, pad = array<i64: 3, 2, 1, 0>, stride = array<i64: 1, 1>} : (tensor<3x5x6x7xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<?x?x?x?xf32>
- // CHECK: -> tensor<3x5x11x7xf32>
- %1 = tosa.max_pool2d %arg0 {kernel = array<i64: 4, 3>, pad = array<i64: 1, 2, 3, 4>, stride = array<i64: 1, 1>} : (tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32>
+ // CHECK: -> tensor<3x7x5x7xf32>
+ %1 = tosa.max_pool2d %arg0 {kernel = array<i64: 4, 3>, pad = array<i64: 3, 2, 1, 0>, stride = array<i64: 1, 1>} : (tensor<3x5x6x7xf32>) -> tensor<?x?x?x?xf32>
return
}
@@ -751,15 +751,15 @@ func.func @conv2d_dynamic_bias(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3
// -----
// CHECK-LABEL: @test_pool_stride
-func.func @test_pool_stride(%arg0: tensor<3x11x12x7xf32>) {
+func.func @test_pool_stride(%arg0: tensor<3x14x12x7xf32>) {
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
%output_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
- // CHECK: -> tensor<3x4x4x7xf32>
- %0 = tosa.avg_pool2d %arg0, %input_zp, %output_zp {acc_type = f32, kernel = array<i64: 4, 3>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 2, 3>} : (tensor<3x11x12x7xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<?x?x?x?xf32>
+ // CHECK: -> tensor<3x6x4x7xf32>
+ %0 = tosa.avg_pool2d %arg0, %input_zp, %output_zp {acc_type = f32, kernel = array<i64: 4, 3>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 2, 3>} : (tensor<3x14x12x7xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<?x?x?x?xf32>
- // CHECK: -> tensor<3x4x4x7xf32>
- %1 = tosa.max_pool2d %arg0 {kernel = array<i64: 4, 3>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 2, 3>} : (tensor<3x11x12x7xf32>) -> tensor<?x?x?x?xf32>
+ // CHECK: -> tensor<3x6x4x7xf32>
+ %1 = tosa.max_pool2d %arg0 {kernel = array<i64: 4, 3>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 2, 3>} : (tensor<3x14x12x7xf32>) -> tensor<?x?x?x?xf32>
return
}
More information about the Mlir-commits
mailing list