[Mlir-commits] [mlir] db7a2e9 - [mlir][linalg] Only compose PadTensorOps if no ExtractSliceOp is rank-reducing.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Mon Dec 13 05:08:02 PST 2021


Author: gysit
Date: 2021-12-13T13:01:30Z
New Revision: db7a2e9176e8e9d81bf896bbd53f4b07a7af0c14

URL: https://github.com/llvm/llvm-project/commit/db7a2e9176e8e9d81bf896bbd53f4b07a7af0c14
DIFF: https://github.com/llvm/llvm-project/commit/db7a2e9176e8e9d81bf896bbd53f4b07a7af0c14.diff

LOG: [mlir][linalg] Only compose PadTensorOps if no ExtractSliceOp is rank-reducing.

Do not compose pad tensor operations if the extract slice of the outer pad tensor operation is rank reducing. The inner extract slice op cannot be rank-reducing since it source type must match the desired type of the padding.

Depends On D115359

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D115428

Added: 
    

Modified: 
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/test/Dialect/Linalg/pad.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index a737876eff5e..0524197886ba 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -357,12 +357,17 @@ Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
       }))
     return PadTensorOp::createPadHighOp(type, source, pad, nofold, loc, b);
 
-  // Exit if the sizes of the dynamic sizes of `sliceOp` do not match the size
-  // of the slice padded by `padTensorOp`.
+  // Exit if `padTensorOpSliceOp`, which defines the slice used by
+  // `padTensorOp`, is rank-reducing.
   auto padTensorOpSliceOp =
       padTensorOp.source().getDefiningOp<tensor::ExtractSliceOp>();
-  if (!padTensorOpSliceOp ||
-      llvm::any_of(llvm::zip(sliceOp.getMixedSizes(),
+  if (!padTensorOpSliceOp || sliceOp.getMixedSizes().size() !=
+                                 padTensorOpSliceOp.getMixedSizes().size())
+    return PadTensorOp::createPadHighOp(type, source, pad, nofold, loc, b);
+
+  // Exit if the sizes of the dynamic sizes of `sliceOp` do not match the size
+  // of the slice padded by `padTensorOp`.
+  if (llvm::any_of(llvm::zip(sliceOp.getMixedSizes(),
                              padTensorOpSliceOp.getMixedSizes()),
                    [](std::tuple<OpFoldResult, OpFoldResult> it) {
                      return !isEqualConstantIntOrValue(std::get<0>(it),

diff  --git a/mlir/test/Dialect/Linalg/pad.mlir b/mlir/test/Dialect/Linalg/pad.mlir
index d67635c8b26a..31cb7c19d860 100644
--- a/mlir/test/Dialect/Linalg/pad.mlir
+++ b/mlir/test/Dialect/Linalg/pad.mlir
@@ -277,6 +277,31 @@ func @
diff erent_padding_dynamic_sizes(%arg0: tensor<64x64xf32>,
 
 #map0 = affine_map<()[s0] -> (64, s0)>
 
+//      MATMUL:  
diff erent_padding_dynamic_rank
+func @
diff erent_padding_dynamic_rank(%arg0: tensor<64x64x1xf32>,
+                                     %iv0 : index) -> tensor<?x?xf32> {
+  %cst = arith.constant 0.0 : f32
+  %size = affine.min #map0()[%iv0]
+  %0 = tensor.extract_slice %arg0[0, 0, 0] [%size, %size, 1] [1, 1, 1] : tensor<64x64x1xf32> to tensor<?x?xf32>
+  %1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0]  {
+    ^bb0(%arg3: index, %arg4: index):  // no predecessors
+      linalg.yield %cst : f32
+  } : tensor<?x?xf32> to tensor<64x64xf32>
+  %2 = linalg.fill(%cst, %1) : f32, tensor<64x64xf32> -> tensor<64x64xf32>
+  %3 = tensor.extract_slice %2[0, 0] [%size, %size] [1, 1] : tensor<64x64xf32> to tensor<?x?xf32>
+
+  // Different dynamic ranks prevent composing the paddings ([%size, %size, 1] vs [%size, %size]).
+  //      MATMUL:  = linalg.fill
+  //      MATMUL:  = linalg.pad_tensor
+  //      MATMUL:  = linalg.matmul
+  %4 = linalg.matmul ins(%3, %3 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) -> tensor<?x?xf32>
+  return %4 : tensor<?x?xf32>
+}
+
+// -----
+
+#map0 = affine_map<()[s0] -> (64, s0)>
+
 //      MATMUL:  
diff erent_padding_static_sizes
 func @
diff erent_padding_static_sizes(%arg0: tensor<62x62xf32>,
                                      %iv0 : index) -> tensor<?x?xf32> {


        


More information about the Mlir-commits mailing list