[Mlir-commits] [mlir] [MLIR][TOSA] Add ERROR_IF checks to TRANSPOSE_CONV2D verifer (PR #133234)

Luke Hutton llvmlistbot at llvm.org
Fri Mar 28 03:08:47 PDT 2025


================
@@ -2896,6 +2896,103 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
 LogicalResult TransposeConv2DOp::verify() {
   if (verifyConvOp(*this).failed() || verifyConvOpModes(*this).failed())
     return failure();
+
+  const RankedTensorType weightType =
+      llvm::dyn_cast<RankedTensorType>(getWeight().getType());
+  if (!weightType)
+    return success();
+
+  const int64_t kernelHeight = weightType.getDimSize(1);
+  const int64_t kernelWidth = weightType.getDimSize(2);
+
+  // Skip further checks if kernel dimensions are dynamic
+  if (kernelHeight == ShapedType::kDynamic ||
+      kernelWidth == ShapedType::kDynamic)
+    return success();
+
+  llvm::ArrayRef<int64_t> padding = getOutPad();
+  const int64_t outPadTop = padding[0];
+  const int64_t outPadBottom = padding[1];
+  const int64_t outPadLeft = padding[2];
+  const int64_t outPadRight = padding[3];
+
+  if (outPadTop <= -kernelHeight)
+    return emitOpError("Expected out_pad_top > -KH, but got: out_pad_top=")
+           << outPadTop << " and KH=" << kernelHeight;
+  if (outPadBottom <= -kernelHeight)
+    return emitOpError(
+               "Expected out_pad_bottom > -KH, but got: out_pad_bottom=")
+           << outPadBottom << " and KH=" << kernelHeight;
+  if (outPadLeft <= -kernelWidth)
+    return emitOpError("Expected out_pad_left > -KW, but got: out_pad_left=")
+           << outPadLeft << " and KW=" << kernelWidth;
+  if (outPadRight <= -kernelWidth)
+    return emitOpError("Expected out_pad_right > -KW, but got: out_pad_right=")
+           << outPadRight << " and KW=" << kernelWidth;
+
+  llvm::ArrayRef<int64_t> strides = getStride();
+  const int64_t strideY = strides[0];
+  const int64_t strideX = strides[1];
+
+  if (strideY < 1 || strideX < 1)
+    return emitOpError("expect all stride values to be >= 1, got [")
+           << strides << "]";
+
+  const RankedTensorType inputType =
+      llvm::dyn_cast<RankedTensorType>(getInput().getType());
+
+  const RankedTensorType outputType =
+      llvm::dyn_cast<RankedTensorType>(getOutput().getType());
+
+  if (!inputType || !outputType)
+    return success();
+
+  const int64_t inputHeight = inputType.getDimSize(1);
+  const int64_t inputWidth = inputType.getDimSize(2);
+  const int64_t outputHeight = outputType.getDimSize(1);
+  const int64_t outputWidth = outputType.getDimSize(2);
+
+  // Skip further checks if the input or output dimensions are dynamic
+  if (inputHeight == ShapedType::kDynamic ||
+      inputWidth == ShapedType::kDynamic ||
+      outputHeight == ShapedType::kDynamic ||
+      outputWidth == ShapedType::kDynamic)
+    return success();
+
+  if (outputHeight !=
+      (inputHeight - 1) * strideY + outPadTop + outPadBottom + kernelHeight)
+    return emitOpError("dimension mismatch: expected OH == (IH - 1) * stride_y "
+                       "+ out_pad_top + out_pad_bottom + KH, but got ")
+           << outputHeight << " != (" << inputHeight << " - 1) * " << strideY
+           << " + " << outPadTop << " + " << outPadBottom << " + "
+           << kernelHeight;
+
+  if (outputWidth !=
----------------
lhutton1 wrote:

This check doesn't run if input/output height is dynamic, but not width (and visa versa)

https://github.com/llvm/llvm-project/pull/133234


More information about the Mlir-commits mailing list