[Mlir-commits] [mlir] 2031d7d - [mlir][Tensor] Drop SplitPaddingPatterns.

Nicolas Vasilache llvmlistbot at llvm.org
Thu Apr 13 03:41:15 PDT 2023


Author: Nicolas Vasilache
Date: 2023-04-13T03:38:29-07:00
New Revision: 2031d7d66dc8bf7ce2168edf6eef6ba568c16d4f

URL: https://github.com/llvm/llvm-project/commit/2031d7d66dc8bf7ce2168edf6eef6ba568c16d4f
DIFF: https://github.com/llvm/llvm-project/commit/2031d7d66dc8bf7ce2168edf6eef6ba568c16d4f.diff

LOG: [mlir][Tensor] Drop SplitPaddingPatterns.

These old patterns are not in use in either MLIR or downstream projects except for one test.
Additionally this is redundant with logic in the tensor.pad tiling implementation.

Drop SplitPaddingPatterns to reduce entropy.

Differential Revision: https://reviews.llvm.org/D148207

Added: 
    

Modified: 
    clang/docs/tools/clang-formatted-files.txt
    mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
    mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
    mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp

Removed: 
    mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
    mlir/test/Dialect/Tensor/split-padding.mlir


################################################################################
diff  --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt
index 4b70e78cb1131..f48921d9f878a 100644
--- a/clang/docs/tools/clang-formatted-files.txt
+++ b/clang/docs/tools/clang-formatted-files.txt
@@ -8250,7 +8250,6 @@ mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
 mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
 mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp
 mlir/lib/Dialect/Tensor/Transforms/PassDetail.h
-mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp
 mlir/lib/Dialect/Tensor/Utils/Utils.cpp
 mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
 mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp

diff  --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
index c0c46e9981dfa..a3b5abf08fd7a 100644
--- a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
@@ -41,12 +41,6 @@ void populateExpandOpsPatterns(RewritePatternSet &patterns);
 /// ops into `patterns`.
 void populateFoldTensorSubsetOpPatterns(RewritePatternSet &patterns);
 
-/// Populates `patterns` with patterns to wrap a tensor.pad op with an scf.if op
-/// to separate the cases where we don't need padding (all pad sizes are
-/// actually zeros) and where we indeed need padding.
-void populateSplitPaddingPatterns(RewritePatternSet &patterns,
-                                  PatternBenefit baseBenefit = 1);
-
 /// Collects patterns to merge consecutive tensor.insert_slice/extract_slice
 /// into one. These patterns are in in this separate entry point because the
 /// bufferization is sensitive over IR structure, particularly those

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
index 9f6780730dc71..44579546f7ea5 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
@@ -7,7 +7,6 @@ add_mlir_dialect_library(MLIRTensorTransforms
   FoldTensorSubsetOps.cpp
   MergeConsecutiveInsertExtractSlicePatterns.cpp
   ReshapePatterns.cpp
-  SplitPaddingPatterns.cpp
   SwapExtractSliceWithProducerPatterns.cpp
 
   ADDITIONAL_HEADER_DIRS

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
deleted file mode 100644
index 9536f3233b814..0000000000000
--- a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-//===- SplitPaddingPatterns.cpp - Splitting tensor.pad Op -----------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements patterns to wrap a tensor.pad op with an scf.if op
-/// to separate the cases where we don't need padding (all pad sizes are
-/// actually zeros) and where we indeed need padding.
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/Arith/IR/Arith.h"
-#include "mlir/Dialect/SCF/IR/SCF.h"
-#include "mlir/Dialect/Tensor/IR/Tensor.h"
-#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
-#include "mlir/Dialect/Utils/StaticValueUtils.h"
-#include "mlir/IR/PatternMatch.h"
-#include "llvm/Support/Debug.h"
-
-#define DEBUG_TYPE "mlir-tensor-split-padding"
-
-using namespace mlir;
-
-/// Returns true if the the given `attrOrValue` is a constant zero.
-static bool isZero(OpFoldResult attrOrValue) {
-  if (std::optional<int64_t> val = getConstantIntValue(attrOrValue))
-    return *val == 0;
-  return false;
-}
-
-/// Gets the given `attrOrValue` as a Value by creating constant ops for
-/// attributes.
-static Value getAsValue(OpFoldResult attrOrValue, OpBuilder &builder,
-                        Location loc) {
-  if (Value val = attrOrValue.dyn_cast<Value>())
-    return val;
-  auto attr = attrOrValue.get<Attribute>().cast<IntegerAttr>();
-  return builder.create<arith::ConstantIndexOp>(loc, attr.getInt());
-}
-
-namespace {
-
-struct SplitPadding final : public OpRewritePattern<tensor::PadOp> {
-  using OpRewritePattern::OpRewritePattern;
-
-  LogicalResult matchAndRewrite(tensor::PadOp padOp,
-                                PatternRewriter &rewriter) const override {
-    // Avoid infinitely applying this pattern.
-    if (padOp->getParentOfType<scf::IfOp>())
-      return failure();
-
-    // If all padding sizes are zero, we don't need to do anything.
-    SmallVector<OpFoldResult> lowPads = padOp.getMixedLowPad();
-    SmallVector<OpFoldResult> highPads = padOp.getMixedHighPad();
-    if (llvm::all_of(lowPads, isZero) && llvm::all_of(highPads, isZero))
-      return failure();
-
-    // Build the condition for the scf.if op: all pad sizes are zero.
-    Location loc = padOp.getLoc();
-    Value cstZero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
-    SmallVector<Value> eqZeroCmpVals;
-    for (OpFoldResult pad : llvm::concat<OpFoldResult>(lowPads, highPads)) {
-      if (!isZero(pad))
-        eqZeroCmpVals.push_back(rewriter.create<arith::CmpIOp>(
-            loc, arith::CmpIPredicate::eq, getAsValue(pad, rewriter, loc),
-            cstZero));
-    }
-    Value ifCond = eqZeroCmpVals.front();
-    for (Value cmp : llvm::ArrayRef(eqZeroCmpVals).drop_front())
-      ifCond = rewriter.create<arith::AndIOp>(loc, ifCond, cmp);
-
-    // Build the scf.if op itself. For the "then" branch, we can elide the
-    // padding. For the "else" branch, we retain the clone op.
-    auto thenBuilder = [&padOp](OpBuilder &builder, Location loc) {
-      builder.create<scf::YieldOp>(loc, padOp.getSource());
-    };
-    auto elseBuilder = [&padOp](OpBuilder &builder, Location loc) {
-      Operation *newOp = builder.clone(*padOp);
-      builder.create<scf::YieldOp>(loc, newOp->getResults());
-    };
-    rewriter.replaceOpWithNewOp<scf::IfOp>(padOp, ifCond, thenBuilder,
-                                           elseBuilder);
-    return success();
-  }
-};
-
-} // namespace
-
-void tensor::populateSplitPaddingPatterns(RewritePatternSet &patterns,
-                                          PatternBenefit baseBenefit) {
-  patterns.add<SplitPadding>(patterns.getContext(), baseBenefit);
-}

diff  --git a/mlir/test/Dialect/Tensor/split-padding.mlir b/mlir/test/Dialect/Tensor/split-padding.mlir
deleted file mode 100644
index fd971eeb64672..0000000000000
--- a/mlir/test/Dialect/Tensor/split-padding.mlir
+++ /dev/null
@@ -1,44 +0,0 @@
-// RUN: mlir-opt -split-input-file -test-tensor-transform-patterns=test-split-padding-patterns %s | FileCheck %s
-
-// CHECK-LABEL: func @pad_all_zero_sizes
-func.func @pad_all_zero_sizes(%input: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
-  %f0 = arith.constant 0.0 : f32
-  %c0 = arith.constant 0 : index
-  %0 = tensor.pad %input low[0, %c0, 0] high[%c0, 0, 0] {
-  ^bb0(%dim0: index, %dim1: index, %dim2: index):
-    tensor.yield %f0 : f32
-  } : tensor<?x?x?xf32> to tensor<?x?x?xf32>
-  return %0 : tensor<?x?x?xf32>
-}
-
-// CHECK-NOT: scf.if
-//     CHECK: tensor.pad
-
-// -----
-
-// CHECK-LABEL: func @pad_non_zero_sizes
-//  CHECK-SAME: (%[[INPUT:.+]]: tensor<?x?x8xf32>, %[[LOW0:.+]]: index, %[[HIGH1:.+]]: index)
-func.func @pad_non_zero_sizes(%input: tensor<?x?x8xf32>, %low0: index, %high1: index) -> tensor<?x?x8xf32> {
-  %f0 = arith.constant 0.0 : f32
-  %0 = tensor.pad %input low[%low0, 0, 0] high[0, %high1, 0] {
-  ^bb0(%dim0: index, %dim1: index, %dim2: index):
-    tensor.yield %f0 : f32
-  } : tensor<?x?x8xf32> to tensor<?x?x8xf32>
-  return %0 : tensor<?x?x8xf32>
-}
-
-// CHECK-DAG: %[[F0:.+]] = arith.constant 0.000000e+00 : f32
-// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
-// CHECK: %[[EQ0:.+]] = arith.cmpi eq, %[[LOW0]], %[[C0]] : index
-// CHECK: %[[EQ1:.+]] = arith.cmpi eq, %[[HIGH1]], %[[C0]] : index
-// CHECK: %[[AND:.+]] = arith.andi %[[EQ0]], %[[EQ1]] : i1
-// CHECK: %[[IF:.+]] = scf.if %[[AND]] -> (tensor<?x?x8xf32>) {
-// CHECK:   scf.yield %[[INPUT]] : tensor<?x?x8xf32>
-// CHECK: } else {
-// CHECK:   %[[PAD:.+]] = tensor.pad %[[INPUT]] low[%[[LOW0]], 0, 0] high[0, %[[HIGH1]], 0]  {
-// CHECK:   ^bb0(%{{.+}}: index, %{{.+}}: index, %{{.+}}: index):
-// CHECK:     tensor.yield %[[F0]] : f32
-// CHECK:   } : tensor<?x?x8xf32> to tensor<?x?x8xf32>
-// CHECK:   scf.yield %[[PAD]] : tensor<?x?x8xf32>
-// CHECK: }
-// CHECK: return %[[IF]] : tensor<?x?x8xf32>

diff  --git a/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp b/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
index 38988923fef67..6dc8b4a27d29b 100644
--- a/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
@@ -43,11 +43,6 @@ struct TestTensorTransforms
 
   void runOnOperation() override;
 
-  Option<bool> testSplitPaddingPatterns{
-      *this, "test-split-padding-patterns",
-      llvm::cl::desc("Test patterns to split tensor.pad ops"),
-      llvm::cl::init(false)};
-
   Option<bool> testFoldConstantExtractSlice{
       *this, "test-fold-constant-extract-slice",
       llvm::cl::desc("Test folding arith.constant and tensor.extract_slice"),
@@ -111,12 +106,6 @@ static void applyFoldIntoPackAndUnpackPatterns(Operation *rootOp) {
   (void)applyPatternsAndFoldGreedily(rootOp, std::move(patterns));
 }
 
-static void applySplitPaddingPatterns(Operation *rootOp) {
-  RewritePatternSet patterns(rootOp->getContext());
-  tensor::populateSplitPaddingPatterns(patterns);
-  (void)applyPatternsAndFoldGreedily(rootOp, std::move(patterns));
-}
-
 static void applyFoldConstantExtractSlicePatterns(Operation *rootOp) {
   RewritePatternSet patterns(rootOp->getContext());
   tensor::ControlConstantExtractSliceFusionFn controlFn =
@@ -291,8 +280,6 @@ void TestTensorTransforms::runOnOperation() {
   Operation *rootOp = getOperation();
   if (testSimplifyPackPatterns)
     applySimplifyPackPatterns(rootOp);
-  if (testSplitPaddingPatterns)
-    applySplitPaddingPatterns(rootOp);
   if (testFoldConstantExtractSlice)
     applyFoldConstantExtractSlicePatterns(rootOp);
   if (testFoldConsecutiveInsertExtractSlice)


        


More information about the Mlir-commits mailing list