[Mlir-commits] [mlir] [mlir][tensor] Add shape inference support for `tensor.concat` op. (PR #140168)

Prashant Kumar llvmlistbot at llvm.org
Sat May 17 23:25:01 PDT 2025


================
@@ -773,11 +775,111 @@ struct SingleInputConcatOp : public OpRewritePattern<ConcatOp> {
     return success();
   }
 };
+
+/// Propagate static shapes into the operands of a `tensor.concat`.
+///
+/// `tensor.concat` requires every operand to match on all dimensions except the
+/// concatenation dimension. If one operand is already static in those
+/// dimensions, the other operands may safely be refined to that same static
+/// shape.
+///
+/// Example:
+///
+/// ```mlir
+///   %2 = tensor.concat dim(0) %0, %1: (tensor<?x12xi32>, tensor<?x?xi32>) ->
+///        tensor<?x12xi32>
+/// ```
+/// ->
+/// ```mlir
+///   %cast = tensor.cast %1 : tensor<?x?xi32> to tensor<?x12xi32>
+///   %2 = tensor.concat dim(0) %0, %cast :
+///        (tensor<?x12xi32>, tensor<?x12xi32>) -> tensor<?x12xi32>
+/// ```
+struct InferConcatOperandTypes : public OpRewritePattern<ConcatOp> {
+  using OpRewritePattern<ConcatOp>::OpRewritePattern;
+
+  LogicalResult matchAndRewrite(ConcatOp concatOp,
+                                PatternRewriter &rewriter) const override {
+    auto operandTensorTypes =
+        llvm::map_range(concatOp->getOperandTypes(), [](Type type) {
+          return llvm::cast<RankedTensorType>(type);
+        });
----------------
pashu123 wrote:

No need to create this extra stack var. Just use it in the fly inside the for loop. 

https://github.com/llvm/llvm-project/pull/140168


More information about the Mlir-commits mailing list