[Mlir-commits] [mlir] [MLIR][TOSA] Add ERROR_IF checks to TRANSPOSE_CONV2D verifer (PR #133234)

Luke Hutton llvmlistbot at llvm.org
Fri Mar 28 03:08:46 PDT 2025


================
@@ -2896,6 +2896,103 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
 LogicalResult TransposeConv2DOp::verify() {
   if (verifyConvOp(*this).failed() || verifyConvOpModes(*this).failed())
     return failure();
+
+  const RankedTensorType weightType =
+      llvm::dyn_cast<RankedTensorType>(getWeight().getType());
+  if (!weightType)
+    return success();
+
+  const int64_t kernelHeight = weightType.getDimSize(1);
+  const int64_t kernelWidth = weightType.getDimSize(2);
+
+  // Skip further checks if kernel dimensions are dynamic
+  if (kernelHeight == ShapedType::kDynamic ||
+      kernelWidth == ShapedType::kDynamic)
+    return success();
+
+  llvm::ArrayRef<int64_t> padding = getOutPad();
+  const int64_t outPadTop = padding[0];
+  const int64_t outPadBottom = padding[1];
+  const int64_t outPadLeft = padding[2];
+  const int64_t outPadRight = padding[3];
+
+  if (outPadTop <= -kernelHeight)
+    return emitOpError("Expected out_pad_top > -KH, but got: out_pad_top=")
+           << outPadTop << " and KH=" << kernelHeight;
+  if (outPadBottom <= -kernelHeight)
+    return emitOpError(
+               "Expected out_pad_bottom > -KH, but got: out_pad_bottom=")
+           << outPadBottom << " and KH=" << kernelHeight;
+  if (outPadLeft <= -kernelWidth)
+    return emitOpError("Expected out_pad_left > -KW, but got: out_pad_left=")
+           << outPadLeft << " and KW=" << kernelWidth;
+  if (outPadRight <= -kernelWidth)
+    return emitOpError("Expected out_pad_right > -KW, but got: out_pad_right=")
+           << outPadRight << " and KW=" << kernelWidth;
+
+  llvm::ArrayRef<int64_t> strides = getStride();
+  const int64_t strideY = strides[0];
+  const int64_t strideX = strides[1];
+
+  if (strideY < 1 || strideX < 1)
+    return emitOpError("expect all stride values to be >= 1, got [")
+           << strides << "]";
+
+  const RankedTensorType inputType =
+      llvm::dyn_cast<RankedTensorType>(getInput().getType());
+
+  const RankedTensorType outputType =
+      llvm::dyn_cast<RankedTensorType>(getOutput().getType());
+
+  if (!inputType || !outputType)
+    return success();
+
+  const int64_t inputHeight = inputType.getDimSize(1);
+  const int64_t inputWidth = inputType.getDimSize(2);
+  const int64_t outputHeight = outputType.getDimSize(1);
+  const int64_t outputWidth = outputType.getDimSize(2);
+
+  // Skip further checks if the input or output dimensions are dynamic
+  if (inputHeight == ShapedType::kDynamic ||
+      inputWidth == ShapedType::kDynamic ||
+      outputHeight == ShapedType::kDynamic ||
+      outputWidth == ShapedType::kDynamic)
+    return success();
+
+  if (outputHeight !=
+      (inputHeight - 1) * strideY + outPadTop + outPadBottom + kernelHeight)
+    return emitOpError("dimension mismatch: expected OH == (IH - 1) * stride_y "
+                       "+ out_pad_top + out_pad_bottom + KH, but got ")
+           << outputHeight << " != (" << inputHeight << " - 1) * " << strideY
+           << " + " << outPadTop << " + " << outPadBottom << " + "
+           << kernelHeight;
+
+  if (outputWidth !=
+      (inputWidth - 1) * strideX + outPadLeft + outPadRight + kernelWidth)
+    return emitOpError("dimension mismatch: expected OW == (IW - 1) * stride_x "
+                       "+ out_pad_left + out_pad_right + KW, but got ")
+           << outputWidth << " != (" << inputWidth << " - 1) * " << strideX
+           << " + " << outPadLeft << " + " << outPadRight << " + "
+           << kernelWidth;
+
+  const RankedTensorType biasType =
+      llvm::dyn_cast<RankedTensorType>(getBias().getType());
+
+  if (!biasType)
+    return success();
+
+  const int64_t biasChannels = biasType.getDimSize(0);
+
+  // Skip further checks if bias is dynamic
+  if (biasChannels == ShapedType::kDynamic)
+    return success();
+
+  const int64_t outputChannels = outputType.getDimSize(3);
+  if (biasChannels != outputChannels && biasChannels != 1)
+    return emitOpError(
+               "bias channels expected to be equal to output channels (")
+           << outputChannels << ") or 1, got " << biasChannels;
----------------
lhutton1 wrote:

It would be nice if we can run this check on the bias even if the kernel or input height and width are not static (due to early exit we miss this opportunity)

https://github.com/llvm/llvm-project/pull/133234


More information about the Mlir-commits mailing list