[Mlir-commits] [mlir] bf28849 - [mlir][linalg] Retire PoolingMaxOp/PoolingMinOp/PoolingSumOp.

Tobias Gysi llvmlistbot at llvm.org
Fri Oct 1 06:52:44 PDT 2021


Author: Tobias Gysi
Date: 2021-10-01T13:51:56Z
New Revision: bf2884974584c65481439b4e44527d0fb7dddb79

URL: https://github.com/llvm/llvm-project/commit/bf2884974584c65481439b4e44527d0fb7dddb79
DIFF: https://github.com/llvm/llvm-project/commit/bf2884974584c65481439b4e44527d0fb7dddb79.diff

LOG: [mlir][linalg] Retire PoolingMaxOp/PoolingMinOp/PoolingSumOp.

The pooling ops are among the last remaining hard coded Linalg operations that have no region attached. They got obsolete due to the OpDSL pooling operations. Removing them allows us to delete specialized code and tests that are not needed for the OpDSL counterparts that rely on the standard code paths.

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D110909

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
    mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
    mlir/test/Dialect/Linalg/affine.mlir
    mlir/test/Dialect/Linalg/invalid.mlir
    mlir/test/Dialect/Linalg/loops.mlir
    mlir/test/Dialect/Linalg/roundtrip.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
index 557cefe808af1..2d65106eef4e8 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
@@ -36,9 +36,6 @@ namespace linalg {
 
 class ConvOp;
 class LinalgOp;
-class PoolingMaxOp;
-class PoolingMinOp;
-class PoolingSumOp;
 
 // TOFO: allow an extra ValueRange to specify an indexing and allow
 // non-hyperrectangular shapes.

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index ce0ca19b5dc2c..bf8d90020889d 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -417,89 +417,6 @@ def ConvOp : PoolingBase_Op<"conv", []> {
   let hasFolder = 1;
 }
 
-// Only support buffer semantics.
-class SingleInputPoolingBase_Op<string mnemonic>
-    : PoolingBase_Op<mnemonic, []> {
-  let description = [{
-    A base class for single input pooling function.
-
-    TODO: Figure out a better way to handle window dimensions, i.e., eliminate
-    the fake memref.
-    The window dimensions are specified by argument `windowDims`. The i-th
-    dimension in the shape of `windowDims` denotes the size of the window along
-    dimension i. For example, if the window size is 2x3, then a memref<2x3>
-    should be passed to the operation as `windowDims`.
-  }];
-
-  let arguments = (ins AnyStridedMemRef:$input,
-                   AnyStridedMemRef:$windowDims,
-                   AnyStridedMemRef:$output,
-                   OptionalAttr<I64ArrayAttr>:$strides,
-                   OptionalAttr<I64ArrayAttr>:$dilations,
-                   OptionalAttr<I64ElementsAttr>:$padding);
-
-  let extraClassDeclaration = commonUtils# [{
-    ValueRange inputs() { return getOperands().slice(0, 2); }
-    ValueRange outputs() { return getOperands().take_back(); }
-
-    ArrayAttr iterator_types() {
-      // Outer parallel loops are always the number of output dimensions.
-      int64_t nPar = getRank(getOutputOperand(0));
-      // The window loops has the same number loops with output dimensions.
-      unsigned nWin = nPar;
-      SmallVector<StringRef, 8> iters(nPar, getParallelIteratorTypeName());
-      iters.reserve(nPar + nWin);
-      iters.append(nWin, getWindowIteratorTypeName());
-      return Builder(getContext()).getStrArrayAttr(iters);
-    }
-
-    ArrayAttr indexing_maps() {
-      MLIRContext *context = getContext();
-      auto nPar = getNumParallelLoops();
-      auto nWin = getNumWindowLoops();
-      assert(nWin > 0 && "expected at least one window dimension");
-      unsigned idx = 0;
-      auto outputDims = makeAffineDimExprs(nPar, idx, context);
-      auto windowDims = makeAffineDimExprs(nWin, idx, context);
-      // Construct the weighedSum expression.
-      auto inputDims =
-          weightedPoolingInputIndex(*this, outputDims, windowDims);
-      return Builder(getContext()).getAffineMapArrayAttr({
-        // input
-        AffineMap::get(idx, 0, inputDims, context),
-        // windowDims
-        AffineMap::get(idx, 0, windowDims, context),
-        // output
-        AffineMap::get(idx, 0, outputDims, context)});
-    }
-  }];
-
-  let verifier = [{ return ::verify(*this); }];
-
-  let hasFolder = 1;
-}
-
-def PoolingMaxOp: SingleInputPoolingBase_Op<"pooling_max"> {
-  let description = [{
-    Takes max op as pooling operation, i.e., it samples the maximum value in the
-    window.
-  }];
-}
-
-def PoolingMinOp: SingleInputPoolingBase_Op<"pooling_min"> {
-  let description = [{
-    Takes min op as pooling operation, i.e., it samples the minimum value in the
-    window.
-  }];
-}
-
-def PoolingSumOp: SingleInputPoolingBase_Op<"pooling_sum"> {
-  let description = [{
-    Takes add op as pooling operation, i.e., it accumulates the values in the
-    window.
-  }];
-}
-
 //===----------------------------------------------------------------------===//
 // Generic Linalg ops.
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 92581a41bf19c..4a05c577b3d3c 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -2638,55 +2638,6 @@ static LogicalResult verify(ConvOp op) {
   return success();
 }
 
-template <typename PoolingOp>
-static LogicalResult verifySingleInputPoolingOp(PoolingOp op) {
-  auto inputType = op.input().getType().template cast<MemRefType>();
-  auto outputType = op.output().getType().template cast<MemRefType>();
-  if (outputType.getElementType() != inputType.getElementType())
-    return op.emitOpError("expects memref elemental types to match");
-
-  auto windowDimsType = op.windowDims().getType().template cast<MemRefType>();
-  if (outputType.getRank() != inputType.getRank() ||
-      outputType.getRank() != windowDimsType.getRank())
-    return op.emitOpError("expects memref ranks to match");
-
-  if (auto strides = op.strides()) {
-    if (failed(verifyStrideOrDilation(op, strides->getValue(),
-                                      /*isStride=*/true)))
-      return failure();
-  }
-  if (auto dilations = op.dilations()) {
-    if (failed(verifyStrideOrDilation(op, dilations->getValue(),
-                                      /*isStride=*/false)))
-      return failure();
-  }
-  return success();
-}
-
-#define DEFINE_POOLING_OP_GET_EFFECTS(OP_NAME)                                 \
-  void OP_NAME::getEffects(                                                    \
-      SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>      \
-          &effects) {                                                          \
-    effects.emplace_back(MemoryEffects::Read::get(), input(),                  \
-                         SideEffects::DefaultResource::get());                 \
-    effects.emplace_back(MemoryEffects::Write::get(), output(),                \
-                         SideEffects::DefaultResource::get());                 \
-  }
-
-static LogicalResult verify(PoolingMaxOp op) {
-  return verifySingleInputPoolingOp(op);
-}
-static LogicalResult verify(PoolingMinOp op) {
-  return verifySingleInputPoolingOp(op);
-}
-static LogicalResult verify(PoolingSumOp op) {
-  return verifySingleInputPoolingOp(op);
-}
-
-DEFINE_POOLING_OP_GET_EFFECTS(PoolingMaxOp)
-DEFINE_POOLING_OP_GET_EFFECTS(PoolingMinOp)
-DEFINE_POOLING_OP_GET_EFFECTS(PoolingSumOp)
-
 #include "mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yamlgen.cpp.inc"
 
 #define GET_OP_CLASSES
@@ -2756,9 +2707,6 @@ mlir::linalg::weightedPoolingInputIndex(PoolingOp op,
       ArrayRef<AffineExpr> windowDims);
 
 INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(ConvOp)
-INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(PoolingMaxOp)
-INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(PoolingMinOp)
-INSTANTIATE_WEIGHTED_POOLING_INPUT_INDEX(PoolingSumOp)
 
 SmallVector<AffineExpr, 4> mlir::linalg::concat(ArrayRef<AffineExpr> a,
                                                 ArrayRef<AffineExpr> b) {
@@ -3215,9 +3163,6 @@ struct SimplifyDepthwiseConvQOp
   }
 
 LINALGOP_FOLDERS(ConvOp)
-LINALGOP_FOLDERS(PoolingMaxOp)
-LINALGOP_FOLDERS(PoolingMinOp)
-LINALGOP_FOLDERS(PoolingSumOp)
 LINALGOP_FOLDERS(CopyOp)
 LINALGOP_FOLDERS(FillOp)
 LINALGOP_FOLDERS(GenericOp)

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 42105a9534719..854166d0ef679 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -209,53 +209,12 @@ Value getPaddedInput(OpBuilder &b, Location loc, Value input,
 namespace {
 
 /// The padding value for a given Op depends on the semantics of the Op.
-/// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is
-/// -inf or minInt and for PoolingMinOp is inf or maxInt.
+/// The identity value for ConvOp is 0.
 template <typename OpType> Attribute getPadValueAttr(Type type) {
   llvm_unreachable("Unexpected op type for getPadValueAttr");
   return {};
 }
 
-template <> Attribute getPadValueAttr<PoolingMaxOp>(Type type) {
-  if (auto floatType = type.dyn_cast<FloatType>()) {
-    return OpBuilder(type.getContext())
-        .getFloatAttr(floatType, APFloat::getInf(floatType.getFloatSemantics(),
-                                                 /*Negative*/ true));
-  }
-  if (auto intType = type.dyn_cast<IntegerType>()) {
-    unsigned width = intType.getWidth();
-    // The select instruction used to lower the PoolingMin uses a signed
-    // comparison, use a signed constant irrespective of the signedness of the
-    // integer type.
-    return OpBuilder(type.getContext())
-        .getIntegerAttr(intType, APInt::getSignedMinValue(width));
-  }
-  llvm_unreachable("Unsupported data type for PoolingMaxOp");
-  return {};
-}
-
-template <> Attribute getPadValueAttr<PoolingMinOp>(Type type) {
-  if (auto floatType = type.dyn_cast<FloatType>()) {
-    return OpBuilder(type.getContext())
-        .getFloatAttr(floatType,
-                      APFloat::getInf(floatType.getFloatSemantics()));
-  }
-  if (auto intType = type.dyn_cast<IntegerType>()) {
-    unsigned width = intType.getWidth();
-    // The select instruction used to lower the PoolingMin uses a signed
-    // comparison, use a signed constant irrespective of the signedness of the
-    // integer type.
-    return OpBuilder(type.getContext())
-        .getIntegerAttr(intType, APInt::getSignedMaxValue(width));
-  }
-  llvm_unreachable("Unsupported data type for PoolingMinOp");
-  return {};
-}
-
-template <> Attribute getPadValueAttr<PoolingSumOp>(Type type) {
-  return OpBuilder(type.getContext()).getZeroAttr(type);
-}
-
 template <> Attribute getPadValueAttr<ConvOp>(Type type) {
   return OpBuilder(type.getContext()).getZeroAttr(type);
 }
@@ -311,72 +270,6 @@ static void emitScalarImplementation(OpBuilder &b, Location loc,
   }
 }
 
-template <typename PoolingOp> static bool hasPadding(PoolingOp poolingOp) {
-  for (unsigned i = 0, e = poolingOp.getNumWindowLoops(); i < e; ++i) {
-    if (poolingOp.getLowPad(i) > 0 || poolingOp.getHighPad(i) > 0)
-      return true;
-  }
-  return false;
-}
-
-template <typename LoadOpTy, typename StoreOpTy, typename PoolingOp>
-static Value getPoolingInput(OpBuilder &b, Location loc, PoolingOp op,
-                             ArrayRef<Value> inputIndices) {
-  if (hasPadding(op)) {
-    Type type =
-        op.input().getType().template cast<MemRefType>().getElementType();
-    Value padValue =
-        b.create<ConstantOp>(loc, type, getPadValueAttr<PoolingOp>(type));
-    return getPaddedInput(b, loc, op.input(), inputIndices,
-                          /*Pad every dimension*/ {}, padValue);
-  }
-  return b.create<LoadOpTy>(loc, op.input(), inputIndices);
-}
-
-template <typename LoadOpTy, typename StoreOpTy, typename OpType>
-void emitPoolingMinMaxScalarImplementation(OpBuilder &b, Location loc,
-                                           ArrayRef<Value> allIvs, OpType op) {
-  InputAndOutputIndices indices = getInputAndOutputIndices(b, loc, allIvs, op);
-  Value lhs = b.create<LoadOpTy>(loc, op.output(), indices.outputs);
-  Value rhs = getPoolingInput<LoadOpTy, StoreOpTy>(b, loc, op, indices.inputs);
-  Value value = llvm::TypeSwitch<Operation *, Value>(op)
-                    .Case([&](PoolingMinOp poolingOp) {
-                      return ArithBuilder(b, loc).select(
-                          ArithBuilder(b, loc).slt(lhs, rhs), lhs, rhs);
-                    })
-                    .Case([&](PoolingMaxOp poolingOp) {
-                      return ArithBuilder(b, loc).select(
-                          ArithBuilder(b, loc).sgt(lhs, rhs), lhs, rhs);
-                    })
-                    .Default([&](auto) { return Value(); });
-  b.create<StoreOpTy>(loc, value, op.output(), indices.outputs);
-}
-
-template <typename LoadOpTy, typename StoreOpTy>
-static void emitScalarImplementation(OpBuilder &b, Location loc,
-                                     ArrayRef<Value> allIvs, PoolingMaxOp op) {
-  emitPoolingMinMaxScalarImplementation<LoadOpTy, StoreOpTy, PoolingMaxOp>(
-      b, loc, allIvs, op);
-}
-
-template <typename LoadOpTy, typename StoreOpTy>
-static void emitScalarImplementation(OpBuilder &b, Location loc,
-                                     ArrayRef<Value> allIvs, PoolingMinOp op) {
-  emitPoolingMinMaxScalarImplementation<LoadOpTy, StoreOpTy, PoolingMinOp>(
-      b, loc, allIvs, op);
-}
-
-template <typename LoadOpTy, typename StoreOpTy>
-static void emitScalarImplementation(OpBuilder &b, Location loc,
-                                     ArrayRef<Value> allIvs, PoolingSumOp op) {
-  auto indices = getInputAndOutputIndices(b, loc, allIvs, op);
-  Value inputVal =
-      getPoolingInput<LoadOpTy, StoreOpTy>(b, loc, op, indices.inputs);
-  Value outputVal = b.create<LoadOpTy>(loc, op.output(), indices.outputs);
-  Value added = ArithBuilder(b, loc).add(outputVal, inputVal);
-  b.create<StoreOpTy>(loc, added, op.output(), indices.outputs);
-}
-
 /// Replace the index operations in the body of the loop nest by the matching
 /// induction variables.
 static void replaceIndexOpsByInductionVariables(LinalgOp linalgOp,
@@ -436,11 +329,9 @@ static Optional<LinalgLoops> linalgOpToLoopsImpl(PatternRewriter &rewriter,
                "expect operands are captured and not passed by loop argument");
         allIvs.append(ivs.begin(), ivs.end());
         llvm::TypeSwitch<Operation *>(linalgOp)
-            .Case<ConvOp, PoolingMaxOp, PoolingMinOp, PoolingSumOp, LinalgOp>(
-                [&](auto op) {
-                  emitScalarImplementation<LoadOpTy, StoreOpTy>(b, loc, allIvs,
-                                                                op);
-                })
+            .Case<ConvOp, LinalgOp>([&](auto op) {
+              emitScalarImplementation<LoadOpTy, StoreOpTy>(b, loc, allIvs, op);
+            })
             .Default([&](Operation *op) { assert(false && "unexpected op"); });
         return scf::ValueVector{};
       });

diff  --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir
index 59c72f657e1d5..74dce8a006d13 100644
--- a/mlir/test/Dialect/Linalg/affine.mlir
+++ b/mlir/test/Dialect/Linalg/affine.mlir
@@ -126,26 +126,3 @@ func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memre
 //       CHECK:       %[[res:.*]] = addf %[[vc]], %[[inc]] : f32
 //       CHECK:       affine.store %[[res]], %[[mC]][%[[b]], %[[m]], %[[n]]] : memref<?x?x?xf32>
 
-// CHECK-LABEL: func @pooling_max_min
-func @pooling_max_min(%arg0: memref<?x?xf32>,
-                      %arg1: memref<?x?xi32>,
-                      %arg2: memref<?x?xf32>) {
-  linalg.pooling_max(%arg0, %arg1, %arg2) { strides = [2, 1] }:
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  linalg.pooling_min(%arg0, %arg1, %arg2) { strides = [2, 1] }:
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// This is a basic check to make sure the right load/stores are used. loops.mlir
-// checks for the rest.
-// CHECK:      affine.load
-// CHECK-NEXT: affine.load
-// CHECK-NEXT: cmpf
-// CHECK-NEXT: select
-// CHECK-NEXT: affine.store
-// The min pooling body.
-// CHECK:      affine.load
-// CHECK-NEXT: affine.load
-// CHECK-NEXT: cmpf
-// CHECK-NEXT: select
-// CHECK-NEXT: affine.store

diff  --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 525e4e05e5369..be378589792d6 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -308,17 +308,6 @@ func @generic(%arg0: memref<?x?xi4>) {
 
 // -----
 
-func @pooling_rank_mismatch(%arg0: memref<?x?x?xf32>,
-                            %arg1: memref<2x3xf32>,
-                            %arg2: memref<?x?x?xf32>) {
-  // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #1 (3)}}
-  linalg.pooling_max(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}:
-    memref<?x?x?xf32>, memref<2x3xf32>, memref<?x?x?xf32>
-  return
-}
-
-// -----
-
 func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?xf32>) {
   // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #1 (3)}}
   linalg.batch_matmul ins(%a3, %b3: memref<?x?x?xf32>, memref<?x?xf32>)

diff  --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 747471623c248..4ba52527b27a3 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -14,8 +14,6 @@
 // CHECK-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
 // CHECK-DAG: #[[$stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
 // CHECK-DAG: #[[$stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
-// CHECK-DAG: #[[$stride1Dilation1Padding1:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 1)>
-// CHECK-DAG: #[[$stride1Dilation1Padding2:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 2)>
 
 // CHECKPARALLEL-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 // CHECKPARALLEL-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
@@ -27,8 +25,6 @@
 // CHECKPARALLEL-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
 // CHECKPARALLEL-DAG: #[[$stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
 // CHECKPARALLEL-DAG: #[[$stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
-// CHECKPARALLEL-DAG: #[[$stride1Dilation1Padding1:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 1)>
-// CHECKPARALLEL-DAG: #[[$stride1Dilation1Padding2:.*]] = affine_map<(d0, d1) -> (d0 + d1 - 2)>
 
 func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c0 = constant 0 : index
@@ -426,404 +422,6 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 //       CHECKPARALLEL:           addf
 //       CHECKPARALLEL:           store %{{.*}}, {{.*}} : memref<?x?x?x?xf32>
 
-func @pooling_max(%arg0: memref<?x?xf32>,
-                  %arg1: memref<?x?xi32>,
-                  %arg2: memref<?x?xf32>) {
-  linalg.pooling_max(%arg0, %arg1, %arg2) { strides = [2, 1] }:
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_max
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride2Dilation1]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1]]
-//       CHECK:           memref.load {{.*}} : memref<?x?xf32>
-//       CHECK:           memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
-//       CHECK:           %[[RES:.*]] = select %{{.*}},
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xf32>
-
-// CHECKPARALLEL-LABEL: func @pooling_max
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride2Dilation1]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1]]
-//       CHECKPARALLEL:         memref.load {{.*}} : memref<?x?xf32>
-//       CHECKPARALLEL:         memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}},
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xf32>
-
-func @pooling_max_padding(%arg0: memref<?x?xf32>,
-                          %arg1: memref<?x?xi32>,
-                          %arg2: memref<?x?xf32>) {
-  linalg.pooling_max(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } :
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_max_padding
-//       CHECK:   %[[PAD:.*]] = constant 0xFF800000 : f32
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECK:           %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECK:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECK:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECK:           %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
-//       CHECK:           %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-//       CHECK:           %[[CMP:.*]] = cmpf ogt, %[[RHS]], %[[SEL]] : f32
-//       CHECK:           %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xf32>
-
-// CHECKPARALLEL-LABEL: func @pooling_max_padding
-//       CHECKPARALLEL:   %[[PAD:.*]] = constant 0xFF800000 : f32
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECKPARALLEL:         %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-//       CHECKPARALLEL:         %[[CMP:.*]] = cmpf ogt, %[[RHS]], %[[SEL]] : f32
-//       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xf32>
-
-func @pooling_max_padding_i32(%arg0: memref<?x?xi32>,
-                              %arg1: memref<?x?xi32>,
-                              %arg2: memref<?x?xi32>) {
-  linalg.pooling_max(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } :
-    memref<?x?xi32>, memref<?x?xi32>, memref<?x?xi32>
-  return
-}
-// CHECK-LABEL: func @pooling_max_padding_i32
-//       CHECK:   %[[PAD:.*]] = constant -2147483648 : i32
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xi32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xi32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECK:           %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xi32>
-//       CHECK:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECK:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECK:           %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
-//       CHECK:           %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-//       CHECK:           %[[CMP:.*]] = cmpi sgt, %[[RHS]], %[[SEL]] : i32
-//       CHECK:           %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xi32>
-
-// CHECKPARALLEL-LABEL: func @pooling_max_padding_i32
-//       CHECKPARALLEL:   %[[PAD:.*]] = constant -2147483648 : i32
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xi32>
-//       CHECKPARALLEL:         %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECKPARALLEL:         %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
-//       CHECKPARALLEL:         %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-//       CHECKPARALLEL:         %[[CMP:.*]] = cmpi sgt, %[[RHS]], %[[SEL]] : i32
-//       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xi32>
-
-func @pooling_min(%arg0: memref<?x?xf32>,
-                  %arg1: memref<?x?xi32>,
-                  %arg2: memref<?x?xf32>) {
-  linalg.pooling_min(%arg0, %arg1, %arg2) { strides = [2, 1] }:
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_min
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride2Dilation1]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1]]
-//       CHECK:           memref.load {{.*}} : memref<?x?xf32>
-//       CHECK:           memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
-//       CHECK:           %[[RES:.*]] = select %{{.*}},
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xf32>
-
-// CHECKPARALLEL-LABEL: func @pooling_min
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride2Dilation1]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1]]
-//       CHECKPARALLEL:         memref.load {{.*}} : memref<?x?xf32>
-//       CHECKPARALLEL:         memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}},
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xf32>
-
-func @pooling_min_padding(%arg0: memref<?x?xf32>,
-                          %arg1: memref<?x?xi32>,
-                          %arg2: memref<?x?xf32>) {
-  linalg.pooling_min(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } :
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_min_padding
-//       CHECK:   %[[PAD:.*]] = constant 0x7F800000 : f32
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECK:           %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECK:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECK:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECK:           %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
-//       CHECK:           %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-//       CHECK:           %[[CMP:.*]] = cmpf olt, %[[RHS]], %[[SEL]] : f32
-//       CHECK:           %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xf32>
-
-// CHECKPARALLEL-LABEL: func @pooling_min_padding
-//       CHECKPARALLEL:   %[[PAD:.*]] = constant 0x7F800000 : f32
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECKPARALLEL:         %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-//       CHECKPARALLEL:         %[[CMP:.*]] = cmpf olt, %[[RHS]], %[[SEL]] : f32
-//       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xf32>
-
-func @pooling_min_padding_i32(%arg0: memref<?x?xi32>,
-                              %arg1: memref<?x?xi32>,
-                              %arg2: memref<?x?xi32>) {
-  linalg.pooling_min(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } :
-    memref<?x?xi32>, memref<?x?xi32>, memref<?x?xi32>
-  return
-}
-// CHECK-LABEL: func @pooling_min_padding_i32
-//       CHECK:   %[[PAD:.*]] = constant 2147483647 : i32
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xi32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xi32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECK:           %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xi32>
-//       CHECK:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECK:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECK:           %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
-//       CHECK:           %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-//       CHECK:           %[[CMP:.*]] = cmpi slt, %[[RHS]], %[[SEL]] : i32
-//       CHECK:           %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xi32>
-
-// CHECKPARALLEL-LABEL: func @pooling_min_padding_i32
-//       CHECKPARALLEL:   %[[PAD:.*]] = constant 2147483647 : i32
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xi32>
-//       CHECKPARALLEL:         %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECKPARALLEL:         %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
-//       CHECKPARALLEL:         %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-//       CHECKPARALLEL:         %[[CMP:.*]] = cmpi slt, %[[RHS]], %[[SEL]] : i32
-//       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xi32>
-
-func @pooling_sum(%arg0: memref<?x?xf32>,
-                  %arg1: memref<?x?xi32>,
-                  %arg2: memref<?x?xf32>) {
-  linalg.pooling_sum(%arg0, %arg1, %arg2) { strides = [2, 1] }:
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_sum
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride2Dilation1]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1]]
-//       CHECK:           %[[RHS:.*]] = memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
-//       CHECK:           %[[LHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECK:           %[[RES:.*]] = addf %[[LHS]], %[[RHS]] : f32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xf32>
-
-// CHECKPARALLEL-LABEL: func @pooling_sum
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride2Dilation1]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1]]
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[RES:.*]] = addf %[[LHS]], %[[RHS]] : f32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xf32>
-
-func @pooling_sum_padding(%arg0: memref<?x?xf32>,
-                          %arg1: memref<?x?xi32>,
-                          %arg2: memref<?x?xf32>) {
-  linalg.pooling_sum(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } :
-    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_sum_padding
-//       CHECK:   %[[PAD:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECK:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECK:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECK:           %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
-//       CHECK:           %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-//       CHECK:           %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECK:           %[[RES:.*]] = addf %[[RHS]], %[[SEL]] : f32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xf32>
-
-// CHECKPARALLEL-LABEL: func @pooling_sum_padding
-//       CHECKPARALLEL:   %[[PAD:.*]] = constant 0.000000e+00 : f32
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xf32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECKPARALLEL:         %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECKPARALLEL:         %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xf32>
-//       CHECKPARALLEL:         %[[RES:.*]] = addf %[[RHS]], %[[SEL]] : f32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xf32>
-
-func @pooling_sum_padding_i32(%arg0: memref<?x?xi32>,
-                              %arg1: memref<?x?xi32>,
-                              %arg2: memref<?x?xi32>) {
-  linalg.pooling_sum(%arg0, %arg1, %arg2) { padding = dense<[[2, 2], [1, 1]]> : tensor<2x2xi64> } :
-    memref<?x?xi32>, memref<?x?xi32>, memref<?x?xi32>
-  return
-}
-// CHECK-LABEL: func @pooling_sum_padding_i32
-//       CHECK:   %[[PAD:.*]] = constant 0 : i32
-//       CHECK:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECK:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECK:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xi32>
-//       CHECK:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xi32>
-//       CHECK:   scf.for {{.*}} to %[[OX]]
-//       CHECK:     scf.for {{.*}} to %[[OY]]
-//       CHECK:       scf.for {{.*}} to %[[WX]]
-//       CHECK:         scf.for {{.*}} to %[[WY]]
-//       CHECK:           %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECK:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECK:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECK:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECK:           %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
-//       CHECK:           %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-//       CHECK:           %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xi32>
-//       CHECK:           %[[RES:.*]] = addi %[[RHS]], %[[SEL]] : i32
-//       CHECK:           store %[[RES]], {{.*}} : memref<?x?xi32>
-
-// CHECKPARALLEL-LABEL: func @pooling_sum_padding_i32
-//       CHECKPARALLEL:   %[[PAD:.*]] = constant 0 : i32
-//       CHECKPARALLEL:   %[[WX:.*]] = memref.dim %arg1, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = memref.dim %arg1, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = memref.dim %arg2, %c0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OY:.*]] = memref.dim %arg2, %c1 : memref<?x?xi32>
-//       CHECKPARALLEL:   scf.parallel {{.*}} to (%[[OX]], %[[OY]])
-//       CHECKPARALLEL:     scf.for {{.*}} to %[[WX]]
-//       CHECKPARALLEL:       scf.for {{.*}} to %[[WY]]
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride1Dilation1Padding2]]
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1Padding1]]
-//       CHECKPARALLEL:         %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[IX]])
-//       CHECKPARALLEL:         %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
-//       CHECKPARALLEL:         %[[LHS:.*]] = memref.load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
-//       CHECKPARALLEL:         %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-//       CHECKPARALLEL:         %[[RHS:.*]] = memref.load {{.*}} : memref<?x?xi32>
-//       CHECKPARALLEL:         %[[RES:.*]] = addi %[[RHS]], %[[SEL]] : i32
-//       CHECKPARALLEL:         store %[[RES]], {{.*}} : memref<?x?xi32>
-
 #accesses = [
   affine_map<(i, j, k) -> (i, j)>,
   affine_map<(i, j, k) -> (i, j, k)>,

diff  --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 23e29e0ab082a..ad6935dc7b8ba 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -269,48 +269,6 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 
 // -----
 
-func @pooling_max(%arg0: memref<?x?x?xf32>,
-                  %arg1: memref<?x?x?xi32>,
-                  %arg2: memref<?x?x?xf32>) {
-  linalg.pooling_max(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}:
-    memref<?x?x?xf32>, memref<?x?x?xi32>, memref<?x?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_max
-//       CHECK:   linalg.pooling_max(%{{.*}}, %{{.*}}, %{{.*}})
-//  CHECK-SAME:   {strides = [2, 1, 2]}
-//  CHECK-SAME:   memref<?x?x?xf32>, memref<?x?x?xi32>, memref<?x?x?xf32>
-
-// -----
-
-func @pooling_min(%arg0: memref<?x?x?xf32>,
-                  %arg1: memref<?x?x?xi32>,
-                  %arg2: memref<?x?x?xf32>) {
-  linalg.pooling_min(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}:
-    memref<?x?x?xf32>, memref<?x?x?xi32>, memref<?x?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_min
-//       CHECK:   linalg.pooling_min(%{{.*}}, %{{.*}}, %{{.*}})
-//  CHECK-SAME:   {strides = [2, 1, 2]}
-//  CHECK-SAME:   memref<?x?x?xf32>, memref<?x?x?xi32>, memref<?x?x?xf32>
-
-// -----
-
-func @pooling_sum(%arg0: memref<?x?x?xf32>,
-                  %arg1: memref<?x?x?xi32>,
-                  %arg2: memref<?x?x?xf32>) {
-  linalg.pooling_sum(%arg0, %arg1, %arg2) {strides = [2, 1, 2]}:
-    memref<?x?x?xf32>, memref<?x?x?xi32>, memref<?x?x?xf32>
-  return
-}
-// CHECK-LABEL: func @pooling_sum
-//       CHECK:   linalg.pooling_sum(%{{.*}}, %{{.*}}, %{{.*}})
-//  CHECK-SAME:   {strides = [2, 1, 2]}
-//  CHECK-SAME:   memref<?x?x?xf32>, memref<?x?x?xi32>, memref<?x?x?xf32>
-
-// -----
-
 #accesses_0 = [
   affine_map<(i, j, k) -> (j, i)>,
   affine_map<(i, j, k) -> ()>,


        


More information about the Mlir-commits mailing list