[Mlir-commits] [mlir] b4e0507 - Rename PatternRewriteSet::insert to add

Tres Popp llvmlistbot at llvm.org
Fri Feb 18 03:18:49 PST 2022


Author: Tres Popp
Date: 2022-02-18T12:18:41+01:00
New Revision: b4e0507ce018eaf42f3e1a728e7bf22d7cae5514

URL: https://github.com/llvm/llvm-project/commit/b4e0507ce018eaf42f3e1a728e7bf22d7cae5514
DIFF: https://github.com/llvm/llvm-project/commit/b4e0507ce018eaf42f3e1a728e7bf22d7cae5514.diff

LOG: Rename PatternRewriteSet::insert to add

insert is soft deprecated, so remove all references so it's less likely
to be used and can be easily removed in the future.

Differential Revision: https://reviews.llvm.org/D120021

Added: 
    

Modified: 
    mlir/include/mlir/IR/PatternMatch.h
    mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
    mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
    mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
    mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
    mlir/lib/Dialect/SCF/SCF.cpp
    mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
    mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
    mlir/unittests/Transforms/Canonicalizer.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 4f728c2e30644..8fd9fa0caaf3c 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -1061,7 +1061,7 @@ class RewritePatternSet {
     private:
       LogicalResult (*implFn)(OpType, PatternRewriter &rewriter);
     };
-    insert(std::make_unique<FnPattern>(std::move(implFn), getContext()));
+    add(std::make_unique<FnPattern>(std::move(implFn), getContext()));
     return *this;
   }
 

diff  --git a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
index 4a3ba46233ff8..1a9254e9db896 100644
--- a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
@@ -370,7 +370,7 @@ LLVM::LLVMStructType mlir::convertMMAToLLVMType(gpu::MMAMatrixType type) {
 
 void mlir::populateGpuWMMAToNVVMConversionPatterns(
     LLVMTypeConverter &converter, RewritePatternSet &patterns) {
-  patterns.insert<WmmaLoadOpToNVVMLowering, WmmaMmaOpToNVVMLowering,
-                  WmmaStoreOpToNVVMLowering, WmmaConstantOpToNVVMLowering,
-                  WmmaElementwiseOpToNVVMLowering>(converter);
+  patterns.add<WmmaLoadOpToNVVMLowering, WmmaMmaOpToNVVMLowering,
+               WmmaStoreOpToNVVMLowering, WmmaConstantOpToNVVMLowering,
+               WmmaElementwiseOpToNVVMLowering>(converter);
 }

diff  --git a/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp b/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
index fffc1b98b0087..8074f2c4751fb 100644
--- a/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
+++ b/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
@@ -210,7 +210,7 @@ OpFoldResult arith::AddIOp::fold(ArrayRef<Attribute> operands) {
 
 void arith::AddIOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<AddIAddConstant, AddISubConstantRHS, AddISubConstantLHS>(
+  patterns.add<AddIAddConstant, AddISubConstantRHS, AddISubConstantLHS>(
       context);
 }
 
@@ -232,9 +232,10 @@ OpFoldResult arith::SubIOp::fold(ArrayRef<Attribute> operands) {
 
 void arith::SubIOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<SubIRHSAddConstant, SubILHSAddConstant, SubIRHSSubConstantRHS,
-                  SubIRHSSubConstantLHS, SubILHSSubConstantRHS,
-                  SubILHSSubConstantLHS>(context);
+  patterns
+      .add<SubIRHSAddConstant, SubILHSAddConstant, SubIRHSSubConstantRHS,
+           SubIRHSSubConstantLHS, SubILHSSubConstantRHS, SubILHSSubConstantLHS>(
+          context);
 }
 
 //===----------------------------------------------------------------------===//
@@ -568,7 +569,7 @@ OpFoldResult arith::XOrIOp::fold(ArrayRef<Attribute> operands) {
 
 void arith::XOrIOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<XOrINotCmpI>(context);
+  patterns.add<XOrINotCmpI>(context);
 }
 
 //===----------------------------------------------------------------------===//
@@ -913,7 +914,7 @@ bool arith::ExtSIOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
 
 void arith::ExtSIOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<ExtSIOfExtUI>(context);
+  patterns.add<ExtSIOfExtUI>(context);
 }
 
 LogicalResult arith::ExtSIOp::verify() {
@@ -1007,7 +1008,7 @@ LogicalResult arith::TruncFOp::verify() {
 
 void arith::AndIOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<AndOfExtUI, AndOfExtSI>(context);
+  patterns.add<AndOfExtUI, AndOfExtSI>(context);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1016,7 +1017,7 @@ void arith::AndIOp::getCanonicalizationPatterns(
 
 void arith::OrIOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<OrOfExtUI, OrOfExtSI>(context);
+  patterns.add<OrOfExtUI, OrOfExtSI>(context);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1155,7 +1156,7 @@ OpFoldResult arith::IndexCastOp::fold(ArrayRef<Attribute> operands) {
 
 void arith::IndexCastOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<IndexCastOfIndexCast, IndexCastOfExtSI>(context);
+  patterns.add<IndexCastOfIndexCast, IndexCastOfExtSI>(context);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1204,7 +1205,7 @@ OpFoldResult arith::BitcastOp::fold(ArrayRef<Attribute> operands) {
 
 void arith::BitcastOp::getCanonicalizationPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<BitcastOfBitcast>(context);
+  patterns.add<BitcastOfBitcast>(context);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1461,7 +1462,7 @@ struct SelectToExtUI : public OpRewritePattern<arith::SelectOp> {
 
 void arith::SelectOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                                   MLIRContext *context) {
-  results.insert<SelectI1Simplify, SelectToExtUI>(context);
+  results.add<SelectI1Simplify, SelectToExtUI>(context);
 }
 
 OpFoldResult arith::SelectOp::fold(ArrayRef<Attribute> operands) {

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index c5a99d820bc90..9b3342a6e801e 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -170,7 +170,7 @@ struct SimplifyClones : public OpRewritePattern<CloneOp> {
 
 void CloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                           MLIRContext *context) {
-  results.insert<SimplifyClones>(context);
+  results.add<SimplifyClones>(context);
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index c148ab9bcfa7f..6b0d22c8f939e 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1702,11 +1702,11 @@ struct TiledLoopResultsFolder : public OpRewritePattern<linalg::TiledLoopOp> {
 
 void TiledLoopOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                               MLIRContext *context) {
-  results.insert<TiledLoopInputsFolder, TiledLoopResultsFolder,
-                 DimOfTiledLoopInsOutsFolder<tensor::DimOp>,
-                 DimOfTiledLoopInsOutsFolder<memref::DimOp>,
-                 DimOfTiledLoopResultFolder<tensor::DimOp>,
-                 DimOfTiledLoopResultFolder<memref::DimOp>>(context);
+  results.add<TiledLoopInputsFolder, TiledLoopResultsFolder,
+              DimOfTiledLoopInsOutsFolder<tensor::DimOp>,
+              DimOfTiledLoopInsOutsFolder<memref::DimOp>,
+              DimOfTiledLoopResultFolder<tensor::DimOp>,
+              DimOfTiledLoopResultFolder<memref::DimOp>>(context);
 }
 
 LogicalResult TiledLoopOp::fold(ArrayRef<Attribute>,

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
index 77e522ad280c0..afe82ff8bdfac 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
@@ -533,9 +533,9 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
       return false;
     });
 
-    patterns.insert<DetensorizeGenericOp>(typeConverter, context);
-    patterns.insert<FunctionNonEntryBlockConversion>(context, typeConverter,
-                                                     blockArgsToDetensor);
+    patterns.add<DetensorizeGenericOp>(typeConverter, context);
+    patterns.add<FunctionNonEntryBlockConversion>(context, typeConverter,
+                                                  blockArgsToDetensor);
     // Since non-entry block arguments get detensorized, we also need to
     // update the control flow inside the function to reflect the correct
     // types.

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
index 8de484c3b86e7..c2937e560ec2f 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
@@ -129,5 +129,5 @@ struct ComposeSubViewOpPattern : public OpRewritePattern<memref::SubViewOp> {
 
 void mlir::memref::populateComposeSubViewPatterns(
     RewritePatternSet &patterns, MLIRContext *context) {
-  patterns.insert<ComposeSubViewOpPattern>(context);
+  patterns.add<ComposeSubViewOpPattern>(context);
 }

diff  --git a/mlir/lib/Dialect/SCF/SCF.cpp b/mlir/lib/Dialect/SCF/SCF.cpp
index 258ead87848f5..54c93e93f37e8 100644
--- a/mlir/lib/Dialect/SCF/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/SCF.cpp
@@ -2844,9 +2844,9 @@ struct WhileUnusedArg : public OpRewritePattern<WhileOp> {
 
 void WhileOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                           MLIRContext *context) {
-  results.insert<RemoveLoopInvariantArgsFromBeforeBlock,
-                 RemoveLoopInvariantValueYielded, WhileConditionTruth,
-                 WhileCmpCond, WhileUnusedResult>(context);
+  results.add<RemoveLoopInvariantArgsFromBeforeBlock,
+              RemoveLoopInvariantValueYielded, WhileConditionTruth,
+              WhileCmpCond, WhileUnusedResult>(context);
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
index 4bfbd617cbd04..7e422b0b4a41c 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
@@ -196,12 +196,11 @@ void mlir::scf::populateSCFForLoopCanonicalizationPatterns(
     RewritePatternSet &patterns) {
   MLIRContext *ctx = patterns.getContext();
   patterns
-      .insert<AffineOpSCFCanonicalizationPattern<AffineMinOp, /*IsMin=*/true>,
-              AffineOpSCFCanonicalizationPattern<AffineMaxOp, /*IsMin=*/false>,
-              DimOfIterArgFolder<tensor::DimOp>,
-              DimOfIterArgFolder<memref::DimOp>,
-              DimOfLoopResultFolder<tensor::DimOp>,
-              DimOfLoopResultFolder<memref::DimOp>>(ctx);
+      .add<AffineOpSCFCanonicalizationPattern<AffineMinOp, /*IsMin=*/true>,
+           AffineOpSCFCanonicalizationPattern<AffineMaxOp, /*IsMin=*/false>,
+           DimOfIterArgFolder<tensor::DimOp>, DimOfIterArgFolder<memref::DimOp>,
+           DimOfLoopResultFolder<tensor::DimOp>,
+           DimOfLoopResultFolder<memref::DimOp>>(ctx);
 }
 
 std::unique_ptr<Pass> mlir::createSCFForLoopCanonicalizationPass() {

diff  --git a/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp b/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
index 0f07e547a7a39..ee614fd27a769 100644
--- a/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
+++ b/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp
@@ -110,7 +110,7 @@ class ReturnOpTypeConversion : public OpConversionPattern<ReturnOp> {
 void mlir::populateBranchOpInterfaceTypeConversionPattern(
     RewritePatternSet &patterns, TypeConverter &typeConverter,
     function_ref<bool(BranchOpInterface, int)> shouldConvertBranchOperand) {
-  patterns.insert<BranchOpInterfaceTypeConversion>(
+  patterns.add<BranchOpInterfaceTypeConversion>(
       typeConverter, patterns.getContext(), shouldConvertBranchOperand);
 }
 

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index e4e7acbb1429e..37375184a4394 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -130,7 +130,7 @@ struct ConcatOptimization : public OpRewritePattern<tosa::ConcatOp> {
 
 void ConcatOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                            MLIRContext *context) {
-  results.insert<ConcatOptimization>(context);
+  results.add<ConcatOptimization>(context);
 }
 
 struct ReshapeReshapeOptimization : public OpRewritePattern<tosa::ReshapeOp> {
@@ -188,8 +188,8 @@ struct ReshapeConstOptimization : public OpRewritePattern<tosa::ReshapeOp> {
 
 void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                             MLIRContext *context) {
-  results.insert<ReshapeReshapeOptimization>(context);
-  results.insert<ReshapeConstOptimization>(context);
+  results.add<ReshapeReshapeOptimization>(context);
+  results.add<ReshapeConstOptimization>(context);
 }
 
 struct ConstantTransposeOptimization
@@ -285,8 +285,8 @@ struct NoOpOptimization : public OpRewritePattern<tosa::TransposeOp> {
 
 void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                               MLIRContext *context) {
-  results.insert<ConstantTransposeOptimization>(context);
-  results.insert<NoOpOptimization>(context);
+  results.add<ConstantTransposeOptimization>(context);
+  results.add<NoOpOptimization>(context);
 }
 
 struct AddZeroOptimization : public OpRewritePattern<tosa::AddOp> {
@@ -323,7 +323,7 @@ struct AddZeroOptimization : public OpRewritePattern<tosa::AddOp> {
 
 void AddOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                         MLIRContext *context) {
-  results.insert<AddZeroOptimization>(context);
+  results.add<AddZeroOptimization>(context);
 }
 
 struct MulOneOptimization : public OpRewritePattern<tosa::MulOp> {
@@ -372,7 +372,7 @@ struct MulOneOptimization : public OpRewritePattern<tosa::MulOp> {
 
 void MulOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                         MLIRContext *context) {
-  results.insert<MulOneOptimization>(context);
+  results.add<MulOneOptimization>(context);
 }
 
 struct MaterializePadValue : public OpRewritePattern<tosa::PadOp> {
@@ -419,7 +419,7 @@ struct MaterializePadValue : public OpRewritePattern<tosa::PadOp> {
 
 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                         MLIRContext *context) {
-  results.insert<MaterializePadValue>(context);
+  results.add<MaterializePadValue>(context);
 }
 
 struct MaxPool2dIsNoOp : public OpRewritePattern<tosa::MaxPool2dOp> {
@@ -454,7 +454,7 @@ struct MaxPool2dIsNoOp : public OpRewritePattern<tosa::MaxPool2dOp> {
 
 void MaxPool2dOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                               MLIRContext *context) {
-  results.insert<MaxPool2dIsNoOp>(context);
+  results.add<MaxPool2dIsNoOp>(context);
 }
 
 struct ClampIsNoOp : public OpRewritePattern<tosa::ClampOp> {
@@ -556,8 +556,8 @@ struct ClampClampOptimization : public OpRewritePattern<tosa::ClampOp> {
 
 void ClampOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                           MLIRContext *context) {
-  results.insert<ClampIsNoOp>(context);
-  results.insert<ClampClampOptimization>(context);
+  results.add<ClampIsNoOp>(context);
+  results.add<ClampClampOptimization>(context);
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
index 4c412f987899e..ac8583f7c03e2 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
@@ -111,5 +111,5 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
 
 void mlir::tosa::populateTosaDecomposeConv2D(MLIRContext *ctx,
                                              RewritePatternSet &patterns) {
-  patterns.insert<Conv2DIsFullyConnected>(ctx);
+  patterns.add<Conv2DIsFullyConnected>(ctx);
 }

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
index 685f97353d746..2ce9f24e6d9c9 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
@@ -117,5 +117,5 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
 
 void mlir::tosa::populateTosaDecomposeDepthwise(MLIRContext *ctx,
                                                 RewritePatternSet &patterns) {
-  patterns.insert<DepthwiseConv2DIsMul>(ctx);
+  patterns.add<DepthwiseConv2DIsMul>(ctx);
 }

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index 330add9e248ea..d6ffa463f31bd 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -375,6 +375,6 @@ class TransposeConvStridedConverter
 
 void mlir::tosa::populateTosaDecomposeTransposeConv(
     MLIRContext *ctx, RewritePatternSet &patterns) {
-  patterns.insert<TransposeConvDilatedConverter>(ctx);
-  patterns.insert<TransposeConvStridedConverter>(ctx);
+  patterns.add<TransposeConvDilatedConverter>(ctx);
+  patterns.add<TransposeConvStridedConverter>(ctx);
 }

diff  --git a/mlir/unittests/Transforms/Canonicalizer.cpp b/mlir/unittests/Transforms/Canonicalizer.cpp
index 71d7be9bef148..f3b0ad821b03b 100644
--- a/mlir/unittests/Transforms/Canonicalizer.cpp
+++ b/mlir/unittests/Transforms/Canonicalizer.cpp
@@ -58,7 +58,7 @@ struct TestDialect : public Dialect {
   }
 
   void getCanonicalizationPatterns(RewritePatternSet &results) const override {
-    results.insert<DisabledPattern, EnabledPattern>(results.getContext());
+    results.add<DisabledPattern, EnabledPattern>(results.getContext());
   }
 };
 


        


More information about the Mlir-commits mailing list