[Mlir-commits] [mlir] [mlir][tosa] Fold PadOp to tensor operations (PR #132700)
Georgios Pinitas
llvmlistbot at llvm.org
Tue Apr 8 06:57:15 PDT 2025
================
@@ -39,6 +39,273 @@ using namespace mlir::tosa;
// Operator Canonicalizers.
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// Tensor Data Engine Operators.
+//===----------------------------------------------------------------------===//
+
+// Check that the zero point of the tensor and padding operations are aligned.
+bool checkMatchingPadConstAndZp(Value padConst, Value zp) {
+ // Check that padConst is a constant value and a scalar tensor
+ DenseElementsAttr padConstAttr;
+ if (!matchPattern(padConst, m_Constant(&padConstAttr)) ||
+ (padConstAttr.size() != 1)) {
+ return false;
+ }
+
+ // Check that floating point pad is zero
+ if (auto padConstFpAttr = mlir::dyn_cast<DenseFPElementsAttr>(padConstAttr)) {
+ float padConstVal = (*padConstFpAttr.begin()).convertToFloat();
+ return padConstVal == 0.0f;
+ }
+
+ // Check that the zp and padConst align for the integer (quantized) case
+ if (auto padConstIntAttr =
+ mlir::dyn_cast<DenseIntElementsAttr>(padConstAttr)) {
+ DenseIntElementsAttr zpAttr;
+ // Check that zp is a constant value and a scalar tensor
+ if (!matchPattern(zp, m_Constant(&zpAttr)) || (padConstAttr.size() != 1)) {
+ return false;
+ }
+
+ // Check equality
+ int64_t zpVal = (*zpAttr.begin()).getSExtValue();
+ int64_t padConstVal = (*padConstIntAttr.begin()).getSExtValue();
+ return zpVal == padConstVal;
+ }
+
+ // Bail-out on unsupported type
+ return false;
+}
+
+namespace {
+template <typename OpTy>
+struct PoolPadFoldAdaptor;
+
+template <>
+struct PoolPadFoldAdaptor<tosa::AvgPool2dOp> {
+ using OpTy = tosa::AvgPool2dOp;
+ static bool checkKernelCompliance(OpTy op, const ArrayRef<int64_t> newPad) {
+ const llvm::ArrayRef<int64_t> kernel = op.getKernel();
+ if (newPad[2] >= kernel[1] || newPad[3] >= kernel[1] ||
+ newPad[0] >= kernel[0] || newPad[1] >= kernel[0])
+ return false;
+ return true;
+ }
+ static bool checkPadConstCompliance(OpTy op, Value padConst) {
+ return checkMatchingPadConstAndZp(padConst, op.getInputZp());
+ }
+ static void replaceOpWithNewPad(PatternRewriter &rewriter, OpTy op,
+ Value padInput, ArrayRef<int64_t> newPad) {
+ rewriter.replaceOpWithNewOp<tosa::AvgPool2dOp>(
+ op, op.getType(), padInput, op.getInputZp(), op.getOutputZp(),
+ op.getKernel(), op.getStride(), rewriter.getDenseI64ArrayAttr(newPad),
+ op.getAccType());
+ }
+};
+
+template <>
+struct PoolPadFoldAdaptor<tosa::MaxPool2dOp> {
+ using OpTy = tosa::MaxPool2dOp;
+ static bool checkKernelCompliance(OpTy op, const ArrayRef<int64_t> newPad) {
+ const llvm::ArrayRef<int64_t> kernel = op.getKernel();
+ if (newPad[2] >= kernel[1] || newPad[3] >= kernel[1] ||
+ newPad[0] >= kernel[0] || newPad[1] >= kernel[0])
+ return false;
+ return true;
+ }
+ static bool checkPadConstCompliance(OpTy, Value padConst) {
+ // Check that padConst is a constant value and a scalar tensor
+ DenseElementsAttr padConstAttr;
+ if (!matchPattern(padConst, m_Constant(&padConstAttr)) ||
+ padConstAttr.size() != 1) {
+ return false;
+ }
+
+ // Pad needs to be in the minimum value to be able to merge
+ if (auto padConstFpAttr =
+ mlir::dyn_cast<DenseFPElementsAttr>(padConstAttr)) {
+ float padConstVal = (*padConstFpAttr.begin()).convertToFloat();
+ return padConstVal == std::numeric_limits<float>::lowest();
+ } else if (auto padConstIntAttr =
+ mlir::dyn_cast<DenseIntElementsAttr>(padConstAttr)) {
+ int64_t padConstVal = (*padConstIntAttr.begin()).getSExtValue();
+ return padConstVal == std::numeric_limits<int32_t>::lowest();
----------------
GeorgeARM wrote:
Good point. I will fix.
https://github.com/llvm/llvm-project/pull/132700
More information about the Mlir-commits
mailing list