[Mlir-commits] [mlir] 7fe2063 - Handle the case of tile and pad a subset of the dimensions
Ahmed Taei
llvmlistbot at llvm.org
Tue Apr 27 17:41:44 PDT 2021
Author: Ahmed Taei
Date: 2021-04-27T17:41:22-07:00
New Revision: 7fe20634460a47242cb3c47c1b41a7032f62a0a7
URL: https://github.com/llvm/llvm-project/commit/7fe20634460a47242cb3c47c1b41a7032f62a0a7
DIFF: https://github.com/llvm/llvm-project/commit/7fe20634460a47242cb3c47c1b41a7032f62a0a7.diff
LOG: Handle the case of tile and pad a subset of the dimensions
This is useful in cases such as tile-distribute-and-pad where not all
dims are tiled
Differential Revision: https://reviews.llvm.org/D101319
Added:
Modified:
mlir/lib/Dialect/Linalg/Utils/Utils.cpp
mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 2714856a2412..022a57343bc8 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -178,6 +178,16 @@ IntegerAttr getSmallestBoundingIndex(Value size) {
.getResult(0)
.dyn_cast<AffineConstantExpr>())
boundingConst = cExpr.getValue();
+ } else if (auto dimOp = size.getDefiningOp<memref::DimOp>()) {
+ auto shape = dimOp.memrefOrTensor().getType().dyn_cast<ShapedType>();
+ if (auto constOp = dimOp.index().getDefiningOp<ConstantOp>()) {
+ if (auto indexAttr = constOp.value().dyn_cast<IntegerAttr>()) {
+ auto dimIndex = indexAttr.getInt();
+ if (!shape.isDynamicDim(dimIndex)) {
+ boundingConst = shape.getShape()[dimIndex];
+ }
+ }
+ }
}
if (boundingConst && *boundingConst >= 0)
return Builder(size.getContext()).getIndexAttr(*boundingConst);
diff --git a/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
index de2aeee46038..63dc9fba7a85 100644
--- a/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
@@ -47,3 +47,36 @@ func @matmul_tensors(
// CHECK-1DIM-TILE: %[[TC:[0-9a-z]+]]: tensor<?x?xi32>) -> tensor<?x?xi32> {
// CHECK-1DIM-TILE-NOT: scf.for
// CHECK-1DIM-TILE: linalg.matmul_i8_i8_i32 ins(%[[TA]], %[[TB]] : tensor<?x?xi8>, tensor<?x?xi8>) outs(%[[TC]] : tensor<?x?xi32>) -> tensor<?x?xi32>
+
+func @matmul_partially_padded_tensors(
+ %arg0: tensor<?x8xi8>, %arg1: tensor<8x?xi8>, %arg2: tensor<?x?xi32>)
+ -> tensor<?x?xi32> {
+ %0 = linalg.matmul_i8_i8_i32 {__internal_linalg_transform__ = "tile-and-pad"}
+ ins(%arg0, %arg1: tensor<?x8xi8>, tensor<8x?xi8>)
+ outs(%arg2: tensor<?x?xi32>)
+ -> tensor<?x?xi32>
+ return %0 : tensor<?x?xi32>
+}
+// CHECK-LABEL: func @matmul_partially_padded_tensors(
+// CHECK: linalg.matmul_i8_i8_i32 ins({{.*}}, {{.*}} : tensor<2x4xi8>, tensor<4x3xi8>) outs({{.*}} : tensor<2x3xi32>) -> tensor<2x3xi32>
+
+
+// CHECK-1DIM-TILE: func @matmul_partially_padded_tensors(
+// CHECK-1DIM-TILE-SAME: %[[TA:[0-9a-z]+]]: tensor<?x8xi8>
+// CHECK-1DIM-TILE-SAME: %[[TB:[0-9a-z]+]]: tensor<8x?xi8>
+// CHECK-1DIM-TILE-SAME: %[[TC:[0-9a-z]+]]: tensor<?x?xi32>) -> tensor<?x?xi32> {
+// CHECK-1DIM-TILE: %[[TD0:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC0:.*]] = %[[TC]]) -> (tensor<?x?xi32>) {
+// CHECK-1DIM-TILE: %[[TD1:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC1:.*]] = %[[TC0]]) -> (tensor<?x?xi32>) {
+// CHECK-1DIM-TILE: %[[sTA:.*]] = subtensor %[[TA]][{{.*}}] : tensor<?x8xi8> to tensor<?x8xi8>
+// CHECK-1DIM-TILE: %[[sTAc:.*]] = tensor.cast %[[sTA]] : tensor<?x8xi8> to tensor<?x?xi8>
+// CHECK-1DIM-TILE: %[[sTB:.*]] = subtensor %[[TB]][{{.*}}] : tensor<8x?xi8> to tensor<8x?xi8>
+// CHECK-1DIM-TILE: %[[sTBc:.*]] = tensor.cast %[[sTB]] : tensor<8x?xi8> to tensor<?x?xi8>
+// CHECK-1DIM-TILE: %[[sTC:.*]] = subtensor %[[TC1]][{{.*}}] : tensor<?x?xi32> to tensor<?x?xi32>
+// CHECK-1DIM-TILE: %[[pA:.*]] = linalg.pad_tensor %[[sTAc]] low[%c0, %c0] high[%{{.*}}, %{{.*}}]
+// CHECK-1DIM-TILE: : tensor<?x?xi8> to tensor<2x8xi8>
+// CHECK-1DIM-TILE: %[[pB:.*]] = linalg.pad_tensor %[[sTBc]] low[%c0, %c0] high[%{{.*}}, %{{.*}}]
+// CHECK-1DIM-TILE: : tensor<?x?xi8> to tensor<8x3xi8>
+// CHECK-1DIM-TILE: %[[pC:.*]] = linalg.pad_tensor %[[sTC]] low[%c0, %c0] high[%{{.*}}, %{{.*}}]
+// CHECK-1DIM-TILE: : tensor<?x?xi32> to tensor<2x3xi32>
+// CHECK-1DIM-TILE: %[[pD:.*]] = linalg.matmul_i8_i8_i32 ins(%[[pA]], %[[pB]] : tensor<2x8xi8>, tensor<8x3xi8>)
+// CHECK-1DIM-TILE: outs(%[[pC]] : tensor<2x3xi32>) -> tensor<2x3xi32>
More information about the Mlir-commits
mailing list