[Mlir-commits] [mlir] [mlir][tosa] Change 'shape' of RESHAPE from attribute to input shape … (PR #125765)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Feb 4 13:33:46 PST 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {clang-format}-->


:warning: C/C++ code formatter, clang-format found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
git-clang-format --diff d13940ee263ff50b7a71e21424913cc0266bf9d4 08c31b561ea812be2bb34eb64341c7b62a7d6a44 --extensions cpp,h -- mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp mlir/lib/Dialect/Tosa/IR/TosaOps.cpp mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
``````````

</details>

<details>
<summary>
View the diff from clang-format here.
</summary>

``````````diff
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index d089fa3b61..edb04010d5 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -1952,11 +1952,10 @@ struct TileConverter : public OpConversionPattern<tosa::TileOp> {
           nestedBuilder.create<linalg::YieldOp>(op.getLoc(), *args.begin());
         });
 
-    auto shapeValue =
-        getTosaConstShape(rewriter, loc, mlir::tosa::convertFromMlirShape(resultTy.getShape()));
+    auto shapeValue = getTosaConstShape(
+        rewriter, loc, mlir::tosa::convertFromMlirShape(resultTy.getShape()));
     rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
-        op, resultTy, genericOp.getResult(0),
-        shapeValue);
+        op, resultTy, genericOp.getResult(0), shapeValue);
     return success();
   }
 };
diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 541ae48a96..fdb8b1e147 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -239,7 +239,7 @@ public:
 
     llvm::SmallVector<int64_t> newShape;
     if (!tosa::getConstShapeValue(reshape.getShape().getDefiningOp(),
-                               newShape)) {
+                                  newShape)) {
       return failure();
     }
 
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 1e6d4ffdcf..f88c6df8e2 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -1311,7 +1311,7 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
   Type inputType = getElementTypeOrSelf(adaptor.getInput1().getType());
   llvm::SmallVector<int64_t> newShapeValue;
   if (!tosa::getConstShapeValue(adaptor.getShape().getDefiningOp(),
-                               newShapeValue)) {
+                                newShapeValue)) {
     auto rank = cast<tosa::shapeType>(adaptor.getShape().getType()).getRank();
     SmallVector<int64_t> fallback(rank, ShapedType::kDynamic);
     inferredReturnShapes.push_back(ShapedTypeComponents(fallback, inputType));
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
index 56df2862f4..04e8ad31cf 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
@@ -92,13 +92,13 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
     llvm::SmallVector<int64_t, 2> revisedInputShape{combined, inputShape[3]};
     auto revisedInputShapeType =
         RankedTensorType::get(revisedInputShape, inputType.getElementType());
-    auto revisedInputShapeValue =
-        getTosaConstShape(rewriter, op.getLoc(), convertFromMlirShape(revisedInputShape));
-    auto reshapedInput = rewriter
-                             .create<tosa::ReshapeOp>(
-                                 op.getLoc(), revisedInputShapeType, input,
-                                 revisedInputShapeValue)
-                             .getResult();
+    auto revisedInputShapeValue = getTosaConstShape(
+        rewriter, op.getLoc(), convertFromMlirShape(revisedInputShape));
+    auto reshapedInput =
+        rewriter
+            .create<tosa::ReshapeOp>(op.getLoc(), revisedInputShapeType, input,
+                                     revisedInputShapeValue)
+            .getResult();
 
     // Reshape kernel to [OC,KH,KW,IC] -> [OC, IC].
     llvm::SmallVector<int64_t, 2> revisedWeightShape{weightShape[0],
@@ -106,13 +106,13 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
     auto revisedWeightShapeType = RankedTensorType::get(
         revisedWeightShape,
         dyn_cast<RankedTensorType>(weight.getType()).getElementType());
-    auto revisedWeightShapeValue =
-        getTosaConstShape(rewriter, op.getLoc(), convertFromMlirShape(revisedWeightShape));
-    auto reshapedWeight = rewriter
-                              .create<tosa::ReshapeOp>(
-                                  op.getLoc(), revisedWeightShapeType, weight,
-                                  revisedWeightShapeValue)
-                              .getResult();
+    auto revisedWeightShapeValue = getTosaConstShape(
+        rewriter, op.getLoc(), convertFromMlirShape(revisedWeightShape));
+    auto reshapedWeight =
+        rewriter
+            .create<tosa::ReshapeOp>(op.getLoc(), revisedWeightShapeType,
+                                     weight, revisedWeightShapeValue)
+            .getResult();
 
     // Perform a fully connected network over the reshaped input and weight.
     llvm::SmallVector<int64_t, 2> fullyConnectedShape{combined, weightShape[0]};
@@ -145,11 +145,10 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
     // Reshape output to [N, IH, IW, OC].
     llvm::SmallVector<int64_t, 4> outputShape{inputShape[0], inputShape[1],
                                               inputShape[2], weightShape[0]};
-    auto outputShapeValue =
-        getTosaConstShape(rewriter, op.getLoc(), convertFromMlirShape(outputShape));
+    auto outputShapeValue = getTosaConstShape(
+        rewriter, op.getLoc(), convertFromMlirShape(outputShape));
     rewriter.replaceOpWithNewOp<tosa::ReshapeOp>(
-        op, resultType, fullyConnectedValue,
-        outputShapeValue);
+        op, resultType, fullyConnectedValue, outputShapeValue);
     return success();
   }
 };
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
index bfad8f7331..b26397d0e3 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
@@ -58,9 +58,8 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
     auto revisedInputShapeValue =
         getTosaConstShape(rewriter, op.getLoc(), revisedInputShape);
     input = rewriter
-                .create<tosa::ReshapeOp>(
-                    op.getLoc(), inputType, input,
-                    revisedInputShapeValue)
+                .create<tosa::ReshapeOp>(op.getLoc(), inputType, input,
+                                         revisedInputShapeValue)
                 .getResult();
 
     Type inputETy = inputType.getElementType();
@@ -155,10 +154,10 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
     auto outputShapeType = RankedTensorType::get(
         outputShape,
         dyn_cast<RankedTensorType>(input.getType()).getElementType());
-    auto outputShapeValue = getTosaConstShape(rewriter, op->getLoc(), outputShape);
+    auto outputShapeValue =
+        getTosaConstShape(rewriter, op->getLoc(), outputShape);
     Value outputValue = rewriter.create<tosa::ReshapeOp>(
-        op.getLoc(), outputShapeType, mulValue,
-        outputShapeValue);
+        op.getLoc(), outputShapeType, mulValue, outputShapeValue);
 
     Value bias = op.getBias();
     if (EqualizeRanks(rewriter, op.getLoc(), outputValue, bias).failed()) {
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index 21355e4339..69a66c9830 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -262,7 +262,8 @@ public:
     llvm::SmallVector<int64_t, 6> convReshapeDims0 = {
         batch, convHeight, convWidth, stride[0], stride[1], outputChannels};
 
-    auto convReshapeDims0Value = getTosaConstShape(rewriter, loc, convReshapeDims0);
+    auto convReshapeDims0Value =
+        getTosaConstShape(rewriter, loc, convReshapeDims0);
 
     conv2d = CreateOpAndInferShape<tosa::ReshapeOp>(
         rewriter, loc, UnrankedTensorType::get(resultETy), conv2d,
@@ -281,7 +282,8 @@ public:
     llvm::SmallVector<int64_t, 6> convReshapeDims1 = {
         batch, convHeight * stride[0], convWidth * stride[1], outputChannels};
 
-    auto convReshapeDims1Value = getTosaConstShape(rewriter, loc, convReshapeDims1);
+    auto convReshapeDims1Value =
+        getTosaConstShape(rewriter, loc, convReshapeDims1);
 
     conv2d = CreateOpAndInferShape<tosa::ReshapeOp>(
         rewriter, loc, UnrankedTensorType::get(resultETy), conv2d,
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
index 0e1f93da23..281f0529a5 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
@@ -404,7 +404,7 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
   // Do not insert a TransposeOp, instead we fold the reshape and its attribute.
   llvm::SmallVector<int64_t> newShape;
   if (!tosa::getConstShapeValue(reshapeOp.getShape().getDefiningOp(),
-                               newShape)) {
+                                newShape)) {
     // this mean shape is not constant
     return std::nullopt;
   }

``````````

</details>


https://github.com/llvm/llvm-project/pull/125765


More information about the Mlir-commits mailing list