[Mlir-commits] [mlir] Make index computation used divsi/remsi (PR #124390)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Jan 24 19:47:52 PST 2025
https://github.com/MaheshRavishankar created https://github.com/llvm/llvm-project/pull/124390
The index computation is meant to be signed. Using unsigned could lead to subtle errors. Fix places where some index math was using unsigned operations.
>From 2d22abf20c7e1b3c437a51baf6e7bb9b8da025c5 Mon Sep 17 00:00:00 2001
From: MaheshRavishankar <mahesh.ravishankar at gmail.com>
Date: Mon, 20 Jan 2025 22:56:47 -0800
Subject: [PATCH] Make divs signed.
Signed-off-by: MaheshRavishankar <mahesh.ravishankar at gmail.com>
squash
Signed-off-by: MaheshRavishankar <mahesh.ravishankar at gmail.com>
---
mlir/lib/Dialect/Arith/Utils/Utils.cpp | 2 +-
.../Linalg/Transforms/ElementwiseOpFusion.cpp | 5 +-
.../TosaToTensor/tosa-to-tensor.mlir | 24 +++----
.../Linalg/data-layout-propagation.mlir | 6 +-
.../fuse-with-reshape-by-collapsing.mlir | 38 +++++------
.../Dialect/Linalg/fusion-push-reshape.mlir | 2 +-
mlir/test/Dialect/Linalg/reshape_fusion.mlir | 66 +++++++++----------
mlir/test/Dialect/Tensor/bufferize.mlir | 4 +-
8 files changed, 74 insertions(+), 73 deletions(-)
diff --git a/mlir/lib/Dialect/Arith/Utils/Utils.cpp b/mlir/lib/Dialect/Arith/Utils/Utils.cpp
index 39c9005e449e38..8dde9866b22b38 100644
--- a/mlir/lib/Dialect/Arith/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Arith/Utils/Utils.cpp
@@ -69,7 +69,7 @@ mlir::inferExpandShapeOutputShape(OpBuilder &b, Location loc,
Value indexGroupSize = cast<Value>(inputShape[inputIndex]);
Value indexGroupStaticSizesProduct =
b.create<arith::ConstantIndexOp>(loc, indexGroupStaticSizesProductInt);
- Value dynamicDimSize = b.createOrFold<arith::DivUIOp>(
+ Value dynamicDimSize = b.createOrFold<arith::DivSIOp>(
loc, indexGroupSize, indexGroupStaticSizesProduct);
outputShapeValues.push_back(dynamicDimSize);
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 3a57f368d4425d..60cae776442915 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -13,6 +13,7 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Arith/Utils/Utils.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
@@ -1572,9 +1573,9 @@ void generateCollapsedIndexingRegion(Location loc, Block *block,
rewriter.create<linalg::IndexOp>(loc, foldedDims.index());
for (auto dim : llvm::reverse(foldedDimsRef.drop_front())) {
indexReplacementVals[dim] =
- rewriter.create<arith::RemUIOp>(loc, newIndexVal, loopRange[dim]);
+ rewriter.create<arith::RemSIOp>(loc, newIndexVal, loopRange[dim]);
newIndexVal =
- rewriter.create<arith::DivUIOp>(loc, newIndexVal, loopRange[dim]);
+ rewriter.create<arith::DivSIOp>(loc, newIndexVal, loopRange[dim]);
}
indexReplacementVals[foldedDims.value().front()] = newIndexVal;
}
diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
index 2f11b31aad2307..27018fb79f60d8 100644
--- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
+++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
@@ -86,7 +86,7 @@ func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor<f32
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %arg0, %[[C0]] : tensor<?xf32>
// CHECK: %[[C2:.*]] = arith.constant 2 : index
-// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C2]] : index
+// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] output_shape [2, %[[VAL_0]]] : tensor<?xf32> into tensor<2x?xf32>
// CHECK: return %[[EXPANDED]] : tensor<2x?xf32>
func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
@@ -135,7 +135,7 @@ func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C2:.*]] = arith.constant 2 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C2]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] output_shape [2, %[[DIV]]] : tensor<?xf32> into tensor<2x?xf32>
// CHECK: return %[[EXPANDED]] : tensor<2x?xf32>
func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
@@ -189,7 +189,7 @@ func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C0_0:.*]] = arith.constant 0 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C0_0]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C0_0]] : index
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [0, 3, %[[DIV]]] : tensor<?xf32> into tensor<0x3x?xf32>
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<0x3x?xf32> to tensor<?x?x?xf32>
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -206,7 +206,7 @@ func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tens
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C8:.*]] = arith.constant 8 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C8]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C8]] : index
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, %[[DIV]], 4] : tensor<?xf32> into tensor<2x?x4xf32>
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor<?x?x?xf32>
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -223,7 +223,7 @@ func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor<?x?
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C6:.*]] = arith.constant 6 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C6]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, 3, %[[DIV]]] : tensor<?xf32> into tensor<2x3x?xf32>
// CHECK: return %[[VAL_1]] : tensor<2x3x?xf32>
func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor<?x3x4xf32>) -> tensor<2x3x?xf32> {
@@ -239,7 +239,7 @@ func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor<?x3x4xf32>) -> t
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C6:.*]] = arith.constant 6 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C6]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 2] : tensor<?xf32> into tensor<?x3x2xf32>
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x2xf32> to tensor<?x?x?xf32>
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -256,7 +256,7 @@ func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) ->
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C12:.*]] = arith.constant 12 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C12]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C12]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 4] : tensor<?xf32> into tensor<?x3x4xf32>
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x4xf32> to tensor<?x?x?xf32>
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -284,7 +284,7 @@ func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor<?x3x4xf32>)
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C8:.*]] = arith.constant 8 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C8]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C8]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, %[[DIV]], 4] : tensor<?xf32> into tensor<2x?x4xf32>
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<2x?x4xf32> to tensor<2x3x4xf32>
// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
@@ -301,7 +301,7 @@ func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor<?x?x?xf32>) -> tensor<2x3
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C12:.*]] = arith.constant 12 : index
-// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C12]] : index
+// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C12]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 4] : tensor<?xf32> into tensor<?x3x4xf32>
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x4xf32> to tensor<2x3x4xf32>
// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
@@ -328,7 +328,7 @@ func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>)
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C6:.*]] = arith.constant 6 : index
-// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C6]] : index
+// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2, 3]] output_shape [%[[VAL_0]], 3, 2, 1] : tensor<?xf32> into tensor<?x3x2x1xf32>
// CHECK: %[[CAST:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x2x1xf32> to tensor<1x3x2x1xf32>
// CHECK: return %[[CAST]] : tensor<1x3x2x1xf32>
@@ -357,7 +357,7 @@ func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor<?x?x?x?xf32>) -> tens
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C6:.*]] = arith.constant 6 : index
-// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C6]] : index
+// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 2, 3] : tensor<?xf32> into tensor<?x2x3xf32>
// CHECK: return %[[EXPANDED]] : tensor<?x2x3xf32>
func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor<?x?x?x2x3xf32>) -> tensor<?x2x3xf32> {
@@ -373,7 +373,7 @@ func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor<?x?x?x2x3xf32>) -> tensor
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
// CHECK: %[[C385:.*]] = arith.constant 385 : index
-// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C385]] : index
+// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C385]] : index
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 5, 77] : tensor<?xf32> into tensor<?x5x77xf32>
// CHECK: return %[[EXPANDED]] : tensor<?x5x77xf32>
func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32> {
diff --git a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
index 07708231a6e2f6..cb8064411bbae0 100644
--- a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
+++ b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
@@ -1301,7 +1301,7 @@ func.func @push_down_unpack_through_expand(%5: tensor<?x32x8x8xf32>, %dim: index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x8x8xf32>
-// CHECK: %[[SZ0:.+]] = arith.divui %[[DIM0]], %[[C32]] : index
+// CHECK: %[[SZ0:.+]] = arith.divsi %[[DIM0]], %[[C32]] : index
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] output_shape [%[[SZ0]], 32, 32, 8, 8] : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
@@ -1322,7 +1322,7 @@ func.func @push_down_unpack_through_expand_empty_outer_dims_perm(%5: tensor<?x32
// CHECK: %[[C32:.+]] = arith.constant 32 : index
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x8x8xf32>
-// CHECK: %[[SZ0:.+]] = arith.divui %[[DIM0]], %[[C32]] : index
+// CHECK: %[[SZ0:.+]] = arith.divsi %[[DIM0]], %[[C32]] : index
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] output_shape [%[[SZ0]], 32, 32, 8, 8] : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
@@ -1373,7 +1373,7 @@ func.func @push_down_unpack_through_expand_on_outer_dims(%5: tensor<?x32x8xf32>,
// CHECK: %[[C256:.+]] = arith.constant 256 : index
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x8xf32>
-// CHECK: %[[SZ0:.+]] = arith.divui %[[DIM0]], %[[C256]] : index
+// CHECK: %[[SZ0:.+]] = arith.divsi %[[DIM0]], %[[C256]] : index
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]] output_shape [%[[SZ0]], 256, 32, 8] : tensor<?x32x8xf32> into tensor<?x256x32x8xf32>
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x256x32x8xf32>
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
diff --git a/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir b/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
index f17881d59a266e..7db997cd4c0b5f 100644
--- a/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
+++ b/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
@@ -99,14 +99,14 @@ func.func @fuse_by_collapsing_indexing_op(%arg0 : tensor<2x12x5x336x9xi32>,
// CHECK-DAG: %[[C7:.+]] = arith.constant 7 : index
// CHECK: %[[IV0:.+]] = linalg.index 0
// CHECK: %[[IV1:.+]] = linalg.index 1
-// CHECK: %[[REM_IV1:.+]] = arith.remui %[[IV1]], %[[C4]]
-// CHECK: %[[DIV_IV1:.+]] = arith.divui %[[IV1]], %[[C4]]
+// CHECK: %[[REM_IV1:.+]] = arith.remsi %[[IV1]], %[[C4]]
+// CHECK: %[[DIV_IV1:.+]] = arith.divsi %[[IV1]], %[[C4]]
// CHECK: %[[IV2:.+]] = linalg.index 2
// CHECK: %[[IV3:.+]] = linalg.index 3
-// CHECK: %[[REM1_IV3:.+]] = arith.remui %[[IV3]], %[[C8]]
-// CHECK: %[[DIV1_IV3:.+]] = arith.divui %[[IV3]], %[[C8]]
-// CHECK: %[[REM2_IV3:.+]] = arith.remui %[[DIV1_IV3]], %[[C7]]
-// CHECK: %[[DIV2_IV3:.+]] = arith.divui %[[DIV1_IV3]], %[[C7]]
+// CHECK: %[[REM1_IV3:.+]] = arith.remsi %[[IV3]], %[[C8]]
+// CHECK: %[[DIV1_IV3:.+]] = arith.divsi %[[IV3]], %[[C8]]
+// CHECK: %[[REM2_IV3:.+]] = arith.remsi %[[DIV1_IV3]], %[[C7]]
+// CHECK: %[[DIV2_IV3:.+]] = arith.divsi %[[DIV1_IV3]], %[[C7]]
// CHECK: %[[IV4:.+]] = linalg.index 4
// CHECK: %[[T0:.+]] = arith.addi %[[IV0]], %[[DIV_IV1]]
// CHECK: %[[T1:.+]] = arith.addi %[[T0]], %[[REM_IV1]]
@@ -215,13 +215,13 @@ func.func @fuse_by_collapsing_dynamic(%arg0 : tensor<?x?x?x?x?xi32>,
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[EXPAND]], %[[C5]]
// CHECK: linalg.generic
// CHECK: %[[IV0:.+]] = linalg.index 1
-// CHECK: %[[REM1_IV0:.+]] = arith.remui %[[IV0]], %[[C5]]
-// CHECK: %[[DIV1_IV0:.+]] = arith.divui %[[IV0]], %[[C5]]
-// CHECK: %[[REM2_IV0:.+]] = arith.remui %[[DIV1_IV0]], %[[D1]]
-// CHECK: %[[DIV2_IV0:.+]] = arith.divui %[[DIV1_IV0]], %[[D1]]
+// CHECK: %[[REM1_IV0:.+]] = arith.remsi %[[IV0]], %[[C5]]
+// CHECK: %[[DIV1_IV0:.+]] = arith.divsi %[[IV0]], %[[C5]]
+// CHECK: %[[REM2_IV0:.+]] = arith.remsi %[[DIV1_IV0]], %[[D1]]
+// CHECK: %[[DIV2_IV0:.+]] = arith.divsi %[[DIV1_IV0]], %[[D1]]
// CHECK: %[[IV1:.+]] = linalg.index 3
-// CHECK: %[[REM1_IV1:.+]] = arith.remui %[[IV1]], %[[D0]]
-// CHECK: %[[DIV1_IV1:.+]] = arith.divui %[[IV1]], %[[D0]]
+// CHECK: %[[REM1_IV1:.+]] = arith.remsi %[[IV1]], %[[D0]]
+// CHECK: %[[DIV1_IV1:.+]] = arith.divsi %[[IV1]], %[[D0]]
// -----
@@ -439,7 +439,7 @@ func.func @fuse_only_one_reassociation(%arg0 : tensor<?x?xf32>, %arg1 : tensor<4
// CHECK-SAME: outs(%[[COLLAPSE_ARG1_1]] :
// CHECK: %[[DIM:.+]] = tensor.dim %[[GENERIC]], %[[C1]] : tensor<4x?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[GENERIC]], %[[C2]] : tensor<4x?x?xf32>
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_2]], %[[C8]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_2]], %[[C8]] : index
// CHECK: %[[EXPANDED_3:.+]] = tensor.expand_shape %[[GENERIC]] {{\[\[}}0], [1], [2, 3]] output_shape [4, %[[DIM]], %[[VAL_1]], 8] : tensor<4x?x?xf32> into tensor<4x?x?x8xf32>
// CHECK: return %[[EXPANDED_3]]
@@ -492,11 +492,11 @@ func.func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>, %sz0: index, %sz1:
// CHECK-SAME: outs(%[[COLLAPSE_INIT]] :
// CHECK-NEXT: ^bb{{[0-9]}}
// CHECK: %[[ID0:.+]] = linalg.index 0
-// CHECK-DAG: %[[T0:.+]] = arith.remui %[[ID0]], %[[C4]]
-// CHECK-DAG: %[[T1:.+]] = arith.divui %[[ID0]], %[[C4]]
+// CHECK-DAG: %[[T0:.+]] = arith.remsi %[[ID0]], %[[C4]]
+// CHECK-DAG: %[[T1:.+]] = arith.divsi %[[ID0]], %[[C4]]
// CHECK: %[[ID1:.+]] = linalg.index 1
-// CHECK-DAG: %[[T2:.+]] = arith.remui %[[ID1]], %[[C8]]
-// CHECK-DAG: %[[T3:.+]] = arith.divui %[[ID1]], %[[C8]]
+// CHECK-DAG: %[[T2:.+]] = arith.remsi %[[ID1]], %[[C8]]
+// CHECK-DAG: %[[T3:.+]] = arith.divsi %[[ID1]], %[[C8]]
// CHECK-DAG: %[[T4:.+]] = arith.addi %[[T1]], %[[T2]]
// CHECK-DAG: %[[T5:.+]] = arith.addi %[[T4]], %[[T0]]
// CHECK-DAG: %[[T6:.+]] = arith.addi %[[T5]], %[[T3]]
@@ -504,8 +504,8 @@ func.func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>, %sz0: index, %sz1:
// CHECK: linalg.yield %[[T7]]
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[GENERIC]], %[[C0]] : tensor<?x?xi32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[GENERIC]], %[[C1]] : tensor<?x?xi32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_1]], %[[C8]] : index
-// CHECK: %[[VAL_3:.+]] = arith.divui %[[DIM_2]], %[[C4]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_1]], %[[C8]] : index
+// CHECK: %[[VAL_3:.+]] = arith.divsi %[[DIM_2]], %[[C4]] : index
// CHECK: %[[EXPANDED_3:.+]] = tensor.expand_shape %[[GENERIC]] {{\[\[}}0, 1], [2, 3]] output_shape [%[[VAL_2]], 8, %[[VAL_3]], 4] : tensor<?x?xi32> into tensor<?x8x?x4xi32>
// CHECK: return %[[EXPANDED_3]]
diff --git a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
index 751ece37bc094f..7acbd843cd1e7c 100644
--- a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
@@ -12,7 +12,7 @@
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
// CHECK-SAME: ins(%[[A]], %[[B]] : tensor<?x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<?x16xf32>)
// CHECK: %[[DIM:.*]] = tensor.dim %[[R]], %[[C0]] : tensor<?x16xf32>
-// CHECK: %[[VAL_1:.*]] = arith.divui %[[DIM]], %[[C112]] : index
+// CHECK: %[[VAL_1:.*]] = arith.divsi %[[DIM]], %[[C112]] : index
// CHECK: %[[RR:.*]] = tensor.expand_shape %[[R]] {{\[\[}}0, 1], [2]] output_shape [%[[VAL_1]], 112, 16] : tensor<?x16xf32> into tensor<?x112x16xf32>
// CHECK: return %[[RR]] : tensor<?x112x16xf32>
func.func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf32>, %sz0: index) -> tensor<?x112x16xf32> {
diff --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir
index b8df5fc88e1999..ef853e4d662a77 100644
--- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir
@@ -37,12 +37,12 @@ func.func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG1]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM_1]], %[[C4]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM_1]], %[[C4]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0], [1], [2, 3]] output_shape [%[[DIM]], %[[DIM_0]], %[[VAL_0]], 4] : tensor<?x?x?xf32> into tensor<?x?x?x4xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_3:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_4:.+]] = tensor.dim %[[ARG1]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_4]], %[[C4]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_4]], %[[C4]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0], [1], [2, 3]] output_shape [%[[DIM_2]], %[[DIM_3]], %[[VAL_1]], 4] : tensor<?x?x?xf32> into tensor<?x?x?x4xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP7]], #[[MAP6]]]
@@ -93,15 +93,15 @@ func.func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM_0]], %[[C20]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM_0]], %[[C20]] : index
// CHECK: %[[T0:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM]], 4, %[[VAL_0]], 5] : tensor<?x?xf32> into tensor<?x4x?x5xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_2]], %[[C20]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_2]], %[[C20]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM_1]], 4, %[[VAL_1]], 5] : tensor<?x?xf32> into tensor<?x4x?x5xf32>
// CHECK: %[[DIM_4:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_5:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_5]], %[[C20]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_5]], %[[C20]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM_4]], 4, %[[VAL_2]], 5] : tensor<?x?xf32> into tensor<?x4x?x5xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP2]], #[[MAP3]], #[[MAP2]]]
@@ -144,18 +144,18 @@ func.func @reshape_as_consumer_permutation
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM]], %[[C12]] : index
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_0]], %[[C2]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM]], %[[C12]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_0]], %[[C2]] : index
// CHECK: %[[T0:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0, 1, 2], [3, 4], [5]] output_shape [3, 4, %[[VAL_0]], %[[VAL_1]], 2, %[[DIM_1]]] : tensor<?x?x?xf32> into tensor<3x4x?x?x2x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_3:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_2]], %[[C12]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_2]], %[[C12]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0, 1, 2], [3]] output_shape [3, 4, %[[VAL_2]], %[[DIM_3]]] : tensor<?x?xf32> into tensor<3x4x?x?xf32>
// CHECK: %[[DIM_5:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_6:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_7:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_3:.+]] = arith.divui %[[DIM_5]], %[[C2]] : index
-// CHECK: %[[VAL_4:.+]] = arith.divui %[[DIM_7]], %[[C12]] : index
+// CHECK: %[[VAL_3:.+]] = arith.divsi %[[DIM_5]], %[[C2]] : index
+// CHECK: %[[VAL_4:.+]] = arith.divsi %[[DIM_7]], %[[C12]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0, 1], [2], [3, 4, 5]] output_shape [%[[VAL_3]], 2, %[[DIM_6]], 3, 4, %[[VAL_4]]] : tensor<?x?x?xf32> into tensor<?x2x?x3x4x?xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP8]], #[[MAP9]], #[[MAP10]]]
@@ -463,15 +463,15 @@ func.func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM]], %[[C20]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM]], %[[C20]] : index
// CHECK: %[[T0:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0, 1, 2], [3]] output_shape [%[[VAL_0]], 4, 5, %[[DIM_0]]] : tensor<?x?xf32> into tensor<?x4x5x?xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_1]], %[[C20]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_1]], %[[C20]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0, 1, 2], [3]] output_shape [%[[VAL_1]], 4, 5, %[[DIM_2]]] : tensor<?x?xf32> into tensor<?x4x5x?xf32>
// CHECK: %[[DIM_4:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_5:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_5]], %[[C20]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_5]], %[[C20]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM_4]], %[[VAL_2]], 4, 5] : tensor<?x?xf32> into tensor<?x?x4x5xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP4]], #[[MAP4]], #[[MAP5]]]
@@ -569,24 +569,24 @@ func.func @reshape_as_consumer_permutation_with_multiple_results
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM]], %[[C12]] : index
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_0]], %[[C2]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM]], %[[C12]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_0]], %[[C2]] : index
// CHECK: %[[RESHAPE0:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0, 1, 2], [3, 4], [5]] output_shape [3, 4, %[[VAL_0]], %[[VAL_1]], 2, %[[DIM_1]]] : tensor<?x?x?xf32> into tensor<3x4x?x?x2x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_3:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_2]], %[[C12]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_2]], %[[C12]] : index
// CHECK: %[[RESHAPE1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0, 1, 2], [3]] output_shape [3, 4, %[[VAL_2]], %[[DIM_3]]] : tensor<?x?xf32> into tensor<3x4x?x?xf32>
// CHECK: %[[DIM_5:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_6:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_7:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_3:.+]] = arith.divui %[[DIM_5]], %[[C2]] : index
-// CHECK: %[[VAL_4:.+]] = arith.divui %[[DIM_7]], %[[C12]] : index
+// CHECK: %[[VAL_3:.+]] = arith.divsi %[[DIM_5]], %[[C2]] : index
+// CHECK: %[[VAL_4:.+]] = arith.divsi %[[DIM_7]], %[[C12]] : index
// CHECK: %[[RESHAPE2:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0, 1], [2], [3, 4, 5]] output_shape [%[[VAL_3]], 2, %[[DIM_6]], 3, 4, %[[VAL_4]]] : tensor<?x?x?xf32> into tensor<?x2x?x3x4x?xf32>
// CHECK: %[[DIM_9:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_10:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?xf32>
// CHECK: %[[DIM_11:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_5:.+]] = arith.divui %[[DIM_10]], %[[C2]] : index
-// CHECK: %[[VAL_6:.+]] = arith.divui %[[DIM_11]], %[[C12]] : index
+// CHECK: %[[VAL_5:.+]] = arith.divsi %[[DIM_10]], %[[C2]] : index
+// CHECK: %[[VAL_6:.+]] = arith.divsi %[[DIM_11]], %[[C12]] : index
// CHECK: %[[RESHAPE3:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0], [1, 2], [3, 4, 5]] output_shape [%[[DIM_9]], %[[VAL_5]], 2, 3, 4, %[[VAL_6]]] : tensor<?x?x?xf32> into tensor<?x?x2x3x4x?xf32>
// CHECK: %[[GENERIC:.+]]:2 = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]]
@@ -667,11 +667,11 @@ func.func @generic_op_reshape_consumer_fusion_reduction(%arg0 : tensor<?x?xf32>,
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM]], %[[C20]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM]], %[[C20]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0, 1, 2], [3]] output_shape [%[[VAL_0]], 4, 5, %[[DIM_0]]] : tensor<?x?xf32> into tensor<?x4x5x?xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG2]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_2]], %[[C20]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_2]], %[[C20]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG2]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM_1]], %[[VAL_1]], 4, 5] : tensor<?x?xf32> into tensor<?x?x4x5xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
@@ -719,13 +719,13 @@ func.func @generic_op_reshape_producer_fusion_with_reduction(%arg0 : tensor<?x7x
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x4x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG1]], %[[C2]] : tensor<?x4x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM]], %[[C8]] : index
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_0]], %[[C7]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM]], %[[C8]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_0]], %[[C7]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0, 1], [2], [3, 4]] output_shape [%[[VAL_0]], 8, 4, %[[VAL_1]], 7] : tensor<?x4x?xf32> into tensor<?x8x4x?x7xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG2]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_1]], %[[C8]] : index
-// CHECK: %[[VAL_3:.+]] = arith.divui %[[DIM_2]], %[[C7]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_1]], %[[C8]] : index
+// CHECK: %[[VAL_3:.+]] = arith.divsi %[[DIM_2]], %[[C7]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG2]] {{\[\[}}0, 1], [2, 3]] output_shape [%[[VAL_2]], 8, %[[VAL_3]], 7] : tensor<?x?xf32> into tensor<?x8x?x7xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]]
@@ -764,15 +764,15 @@ func.func @linalg_add_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM_0]], %[[C20]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM_0]], %[[C20]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG0]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM]], %[[VAL_0]], 4, 5] : tensor<?x?xf32> into tensor<?x?x4x5xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_2]], %[[C20]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_2]], %[[C20]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM_1]], %[[VAL_1]], 4, 5] : tensor<?x?xf32> into tensor<?x?x4x5xf32>
// CHECK: %[[DIM_4:.+]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_5:.+]] = tensor.dim %[[ARG2]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_5]], %[[C20]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_5]], %[[C20]] : index
// CHECK: %[[T3:.+]] = tensor.expand_shape %[[ARG2]] {{\[\[}}0], [1, 2, 3]] output_shape [%[[DIM_4]], %[[VAL_2]], 4, 5] : tensor<?x?xf32> into tensor<?x?x4x5xf32>
// CHECK: %[[T4:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]], #[[MAP]]]
@@ -809,13 +809,13 @@ func.func @linalg_add_reshape_producer_fusion(%arg0 : tensor<?x7x?x8xf32>,
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG1]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_0:.+]] = arith.divui %[[DIM]], %[[C7]] : index
-// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_0]], %[[C8]] : index
+// CHECK: %[[VAL_0:.+]] = arith.divsi %[[DIM]], %[[C7]] : index
+// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_0]], %[[C8]] : index
// CHECK: %[[T1:.+]] = tensor.expand_shape %[[ARG1]] {{\[\[}}0, 1], [2, 3]] output_shape [%[[VAL_0]], 7, %[[VAL_1]], 8] : tensor<?x?xf32> into tensor<?x7x?x8xf32>
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[ARG2]], %[[C1]] : tensor<?x?xf32>
-// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_1]], %[[C7]] : index
-// CHECK: %[[VAL_3:.+]] = arith.divui %[[DIM_2]], %[[C8]] : index
+// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_1]], %[[C7]] : index
+// CHECK: %[[VAL_3:.+]] = arith.divsi %[[DIM_2]], %[[C8]] : index
// CHECK: %[[T2:.+]] = tensor.expand_shape %[[ARG2]] {{\[\[}}0, 1], [2, 3]] output_shape [%[[VAL_2]], 7, %[[VAL_3]], 8] : tensor<?x?xf32> into tensor<?x7x?x8xf32>
// CHECK: %[[T3:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP]], #[[$MAP]]]
diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir
index ecd285be461947..9ea0a15f31185a 100644
--- a/mlir/test/Dialect/Tensor/bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/bufferize.mlir
@@ -372,7 +372,7 @@ func.func @tensor.expand_shape(%t1: tensor<?x10xf32>, %sz0: index) -> tensor<2x?
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = memref.dim %[[m1]], %[[C0]] : memref<?x10xf32>
// CHECK: %[[C2:.*]] = arith.constant 2 : index
- // CHECK: %[[VAL_1:.*]] = arith.divui %[[DIM]], %[[C2]] : index
+ // CHECK: %[[VAL_1:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
// CHECK: %[[expanded:.*]] = memref.expand_shape %[[m1]] {{\[\[}}0, 1], [2]] output_shape [2, %[[VAL_1]], 10] : memref<?x10xf32> into memref<2x?x10xf32>
%0 = tensor.expand_shape %t1 [[0, 1], [2]] output_shape [2, %sz0, 10]
: tensor<?x10xf32> into tensor<2x?x10xf32>
@@ -393,7 +393,7 @@ func.func @tensor.expand_shape_of_slice(
%0 = tensor.extract_slice %t1[%o1, 5][%s1, 10][1, 1] :
tensor<?x20xf32> to tensor<?x10xf32>
// CHECK: %[[C7:.*]] = arith.constant 7 : index
- // CHECK: %[[VAL_1:.*]] = arith.divui %{{.*}}, %[[C7]] : index
+ // CHECK: %[[VAL_1:.*]] = arith.divsi %{{.*}}, %[[C7]] : index
// CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] {{\[\[}}0, 1], [2, 3]] output_shape [%[[VAL_1]], 7, 2, 5] : memref<?x10xf32, strided<[20, 1], offset: ?>> into memref<?x7x2x5xf32, strided<[140, 20, 5, 1], offset: ?>>
%1 = tensor.expand_shape %0 [[0, 1], [2, 3]] output_shape [%sz0, 7, 2, 5] :
tensor<?x10xf32> into tensor<?x7x2x5xf32>
More information about the Mlir-commits
mailing list