[Mlir-commits] [mlir] eaabd76 - Revert "[MLIR][Vector] Generalize DropUnitDimFromElementwiseOps to non leading / trailing dimensions." (#97652)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Jul 3 16:03:47 PDT 2024


Author: Han-Chung Wang
Date: 2024-07-03T16:03:41-07:00
New Revision: eaabd762bd54dec75d8fee69f6aa1c26d1cbc68f

URL: https://github.com/llvm/llvm-project/commit/eaabd762bd54dec75d8fee69f6aa1c26d1cbc68f
DIFF: https://github.com/llvm/llvm-project/commit/eaabd762bd54dec75d8fee69f6aa1c26d1cbc68f.diff

LOG: Revert "[MLIR][Vector] Generalize DropUnitDimFromElementwiseOps to non leading / trailing dimensions." (#97652)

Reverts llvm/llvm-project#92934 because it breaks some lowering. To
repro: `mlir-opt -test-vector-transfer-flatten-patterns ~/repro.mlir`

```mlir
func.func @unit_dim_folding(%arg0: vector<1x1xf32>) -> vector<1x1xf32> {
  %cst = arith.constant dense<0.000000e+00> : vector<1x1xf32>
  %0 = arith.mulf %arg0, %cst : vector<1x1xf32>
  return %0 : vector<1x1xf32>
}
```

Added: 
    

Modified: 
    mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
    mlir/test/Dialect/Vector/vector-transfer-flatten.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index c7d3022eff4d3..da5954b70a2ec 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1622,27 +1622,7 @@ struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> {
   }
 };
 
-// Scalable unit dimensions are not supported. Folding such dimensions would
-// require "shifting" the scalable flag onto some other fixed-width dim (e.g.
-// vector<[1]x4xf32> -> vector<[4]xf32>). This could be implemented in the
-// future.
-static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) {
-  auto inVecShape = inVecTy.getShape();
-  SmallVector<int64_t> newShape;
-  SmallVector<bool> newScalableDims;
-  for (auto [dim, isScalable] :
-       llvm::zip_equal(inVecShape, inVecTy.getScalableDims())) {
-    if (dim == 1 && !isScalable)
-      continue;
-
-    newShape.push_back(dim);
-    newScalableDims.push_back(isScalable);
-  }
-
-  return VectorType::get(newShape, inVecTy.getElementType(), newScalableDims);
-}
-
-/// For vectors with at least an unit dim, replaces:
+/// For vectors with either leading or trailing unit dim, replaces:
 ///   elementwise(a, b)
 /// with:
 ///   sc_a = shape_cast(a)
@@ -1654,16 +1634,20 @@ static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) {
 /// required to be rank > 1.
 ///
 /// Ex:
+/// ```
 ///  %mul = arith.mulf %B_row, %A_row : vector<1x[4]xf32>
 ///  %cast = vector.shape_cast %mul : vector<1x[4]xf32> to vector<[4]xf32>
+/// ```
 ///
 /// gets converted to:
 ///
+/// ```
 ///  %B_row_sc = vector.shape_cast %B_row : vector<1x[4]xf32> to vector<[4]xf32>
 ///  %A_row_sc = vector.shape_cast %A_row : vector<1x[4]xf32> to vector<[4]xf32>
 ///  %mul = arith.mulf %B_row_sc, %A_row_sc : vector<[4]xf32>
 ///  %cast_new = vector.shape_cast %mul : vector<[4]xf32> to vector<1x[4]xf32>
 ///  %cast = vector.shape_cast %cast_new : vector<1x[4]xf32> to vector<[4]xf32>
+/// ```
 ///
 /// Patterns for folding shape_casts should instantly eliminate `%cast_new` and
 /// `%cast`.
@@ -1683,29 +1667,42 @@ struct DropUnitDimFromElementwiseOps final
     // guaranteed to have identical shapes (with some exceptions such as
     // `arith.select`) and it suffices to only check one of them.
     auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType());
-    if (!sourceVectorType || sourceVectorType.getRank() < 2)
+    if (!sourceVectorType)
+      return failure();
+    if (sourceVectorType.getRank() < 2)
+      return failure();
+
+    bool hasTrailingDimUnitFixed =
+        ((sourceVectorType.getShape().back() == 1) &&
+         (!sourceVectorType.getScalableDims().back()));
+    bool hasLeadingDimUnitFixed =
+        ((sourceVectorType.getShape().front() == 1) &&
+         (!sourceVectorType.getScalableDims().front()));
+    if (!hasLeadingDimUnitFixed && !hasTrailingDimUnitFixed)
       return failure();
 
+    // Drop leading/trailing unit dim by applying vector.shape_cast to all
+    // operands
+    int64_t dim = hasLeadingDimUnitFixed ? 0 : sourceVectorType.getRank() - 1;
+
     SmallVector<Value> newOperands;
     auto loc = op->getLoc();
     for (auto operand : op->getOperands()) {
       auto opVectorType = cast<VectorType>(operand.getType());
-      auto newVType = dropNonScalableUnitDimFromType(opVectorType);
-      if (newVType == opVectorType)
-        return rewriter.notifyMatchFailure(op, "No unit dimension to remove.");
-
+      VectorType newVType = VectorType::Builder(opVectorType).dropDim(dim);
       auto opSC = rewriter.create<vector::ShapeCastOp>(loc, newVType, operand);
       newOperands.push_back(opSC);
     }
 
     VectorType newResultVectorType =
-        dropNonScalableUnitDimFromType(resultVectorType);
-    // Create an updated elementwise Op without unit dim.
+        VectorType::Builder(resultVectorType).dropDim(dim);
+    // Create an updated elementwise Op without leading/trailing unit dim
     Operation *elementwiseOp =
         rewriter.create(loc, op->getName().getIdentifier(), newOperands,
                         newResultVectorType, op->getAttrs());
 
-    // Restore the unit dim by applying vector.shape_cast to the result.
+    // Restore the leading/trailing unit dim by applying vector.shape_cast
+    // to the result
     rewriter.replaceOpWithNewOp<ShapeCastOp>(op, resultVectorType,
                                              elementwiseOp->getResult(0));
 

diff  --git a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
index 3a5041fca53fc..5fd3cbd54aa58 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
@@ -604,42 +604,6 @@ func.func @fold_unit_dims_entirely(%arg0 : vector<8xi32>,
 
 // -----
 
-func.func @fold_inner_unit_dim(%arg0 : vector<8x1x3xf128>,
-                              %arg1 : vector<1x8x3xf128>) -> vector<8x3xf128> {
-   %sc_arg1 = vector.shape_cast %arg1 : vector<1x8x3xf128> to vector<8x1x3xf128>
-   %mul = arith.mulf %arg0, %sc_arg1 : vector<8x1x3xf128>
-   %res = vector.shape_cast %mul : vector<8x1x3xf128> to vector<8x3xf128>
-   return %res : vector<8x3xf128>
-}
-
-// CHECK-LABEL: func.func @fold_inner_unit_dim(
-// CHECK-SAME:    %[[VAL_0:.*]]: vector<8x1x3xf128>,
-// CHECK-SAME:    %[[VAL_1:.*]]: vector<1x8x3xf128>) -> vector<8x3xf128> {
-// CHECK:         %[[VAL_2:.*]] = vector.shape_cast %[[VAL_0]] : vector<8x1x3xf128> to vector<8x3xf128>
-// CHECK:         %[[VAL_3:.*]] = vector.shape_cast %[[VAL_1]] : vector<1x8x3xf128> to vector<8x3xf128>
-// CHECK:         %[[VAL_4:.*]] = arith.mulf %[[VAL_2]], %[[VAL_3]] : vector<8x3xf128>
-// CHECK:         return %[[VAL_4]] : vector<8x3xf128>
-
-// -----
-
-func.func @fold_inner_unit_dim_scalable(%arg0 : vector<8x1x[1]x3xf128>,
-                              %arg1 : vector<1x8x[1]x3xf128>) -> vector<8x[1]x3xf128> {
-   %sc_arg1 = vector.shape_cast %arg1 : vector<1x8x[1]x3xf128> to vector<8x1x[1]x3xf128>
-   %mul = arith.mulf %arg0, %sc_arg1 : vector<8x1x[1]x3xf128>
-   %res = vector.shape_cast %mul : vector<8x1x[1]x3xf128> to vector<8x[1]x3xf128>
-   return %res : vector<8x[1]x3xf128>
-}
-
-// CHECK-LABEL: func.func @fold_inner_unit_dim_scalable(
-// CHECK-SAME:    %[[VAL_0:.*]]: vector<8x1x[1]x3xf128>,
-// CHECK-SAME:    %[[VAL_1:.*]]: vector<1x8x[1]x3xf128>) -> vector<8x[1]x3xf128> {
-// CHECK:         %[[VAL_2:.*]] = vector.shape_cast %[[VAL_0]] : vector<8x1x[1]x3xf128> to vector<8x[1]x3xf128>
-// CHECK:         %[[VAL_3:.*]] = vector.shape_cast %[[VAL_1]] : vector<1x8x[1]x3xf128> to vector<8x[1]x3xf128>
-// CHECK:         %[[VAL_4:.*]] = arith.mulf %[[VAL_2]], %[[VAL_3]] : vector<8x[1]x3xf128>
-// CHECK:         return %[[VAL_4]] : vector<8x[1]x3xf128>
-
-// -----
-
 func.func @negative_out_of_bound_transfer_read(
     %arg : memref<?x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>>) -> vector<5x4x3x2xi8> {
   %c0 = arith.constant 0 : index


        


More information about the Mlir-commits mailing list