[Mlir-commits] [mlir] [mlir][tensor] Fix FoldTensorCastProducerOp for multiple result operations (PR #93374)

Prashant Kumar llvmlistbot at llvm.org
Wed Jun 5 13:19:22 PDT 2024


https://github.com/pashu123 updated https://github.com/llvm/llvm-project/pull/93374

>From ae2bae5bd04445e4d9210af8f465455358cc1858 Mon Sep 17 00:00:00 2001
From: Prashant Kumar <pk5561 at gmail.com>
Date: Sat, 25 May 2024 13:45:07 +0530
Subject: [PATCH] [mlir][tensor] Fix bug when having multiple result

For patterns where there are multiple results apart from dpsInits this
fails.
For eg:
```
        %13:2 = iree_codegen.ukernel.generic "iree_uk_unpack"
ins(%extracted_slice : tensor<?x1x16x16xf32>) outs(%11 :
tensor<?x16xf32>) ..
``` The above op has results apart from dpsInit and hence fails.
The PR assumes that the result has dpsInits followed by nondpsInits.
---
 mlir/lib/Dialect/Tensor/IR/TensorOps.cpp   |  6 +++---
 mlir/test/Dialect/Tensor/canonicalize.mlir | 16 ++++++++++++++++
 2 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 8545c7b9af8f7..986008b9d379d 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -4531,17 +4531,17 @@ struct FoldTensorCastProducerOp
     if (!hasTensorCastOperand)
       return failure();
 
-    SmallVector<Type, 4> newResultTypes;
-    newResultTypes.reserve(op->getNumResults());
+    SmallVector<Type, 4> newResultTypes(op->getResultTypes());
     SmallVector<Value, 4> newOperands;
     newOperands.reserve(op->getNumOperands());
+    int64_t dpsInitIdx = 0;
     for (OpOperand &opOperand : op->getOpOperands()) {
       auto tensorCastOp = opOperand.get().getDefiningOp<tensor::CastOp>();
       bool fold = canFoldIntoConsumerOp(tensorCastOp);
       newOperands.push_back(fold ? tensorCastOp.getOperand() : opOperand.get());
       if (op.isDpsInit(&opOperand) &&
           !llvm::isa<MemRefType>(newOperands.back().getType()))
-        newResultTypes.push_back(newOperands.back().getType());
+        newResultTypes[dpsInitIdx++] = newOperands.back().getType();
     }
 
     // Clone op.
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index f7fbd3834288b..6cf5667a8c2a4 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -2523,3 +2523,19 @@ func.func @dim_out_of_bounds() -> vector<7xi32> {
     %16 = affine.vector_load %alloc_21[%c1, %c1, %dim] : memref<?x26x2xi32>, vector<7xi32>
     return %16 : vector<7xi32>
 }
+
+// -----
+
+// This test canonicalization in case of multiple results.
+// CHECK-LABEL:   func.func @test_destination_multiple_result(
+// CHECK-SAME:         %[[ARG1:.*]]: tensor<2x2xf32>,
+// CHECK-SAME:         %[[ARG2:.*]]: tensor<2x2xf32>) -> index {
+// CHECK:           %[[RES:.*]]:2 = test.destination_style_op ins(%[[ARG1]] : tensor<2x2xf32>)
+// CHECK-SAME:      outs(%[[ARG2]] : tensor<2x2xf32>) -> tensor<2x2xf32>, index
+// CHECK:           return %[[RES]]#1 : index
+func.func @test_destination_multiple_result(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> index {
+  %cast = tensor.cast %arg0 : tensor<2x2xf32> to tensor<?x2xf32>
+  %cast_0 = tensor.cast %arg1 : tensor<2x2xf32> to tensor<?x2xf32>
+  %0:2 = test.destination_style_op ins(%cast : tensor<?x2xf32>) outs(%cast_0 : tensor<?x2xf32>) -> tensor<?x2xf32>, index
+  return %0#1 : index
+}



More information about the Mlir-commits mailing list