[Mlir-commits] [mlir] [mlir][tosa] Always generated pad_const and remove input_zp attr for PadOp (PR #129336)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Feb 28 15:18:03 PST 2025
https://github.com/Jerry-Ge updated https://github.com/llvm/llvm-project/pull/129336
>From 91f7b7b695093878be9237d86ea6a4e8dcc39550 Mon Sep 17 00:00:00 2001
From: Jerry Ge <jerry.ge at arm.com>
Date: Thu, 13 Feb 2025 16:25:26 -0800
Subject: [PATCH] [mlir][tosa] Always generated pad_const and remove input_zp
attr for PadOp
Signed-off-by: Jerry Ge <jerry.ge at arm.com>
Change-Id: I2b7a0169b7ec1158d28779713ad125c061e04592
Co-authored-by: Udaya Ranga <udaya.ranga at arm.com>
Co-authored-by: Tai Ly <tai.ly at arm.com>
Change-Id: I4c9136c84afefc974ac3d4d151aa2049efe1ce3d
---
.../mlir/Dialect/Tosa/IR/TosaOpBase.td | 8 ---
mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h | 4 ++
mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td | 7 +--
.../Conversion/TosaToTensor/TosaToTensor.cpp | 21 +-------
.../Dialect/Tosa/IR/TosaCanonicalizations.cpp | 47 ------------------
mlir/lib/Dialect/Tosa/IR/TosaOps.cpp | 45 ++++++++---------
.../Transforms/TosaDecomposeTransposeConv.cpp | 34 ++++++-------
.../TosaToTensor/tosa-to-tensor.mlir | 25 ++++++----
mlir/test/Dialect/Tosa/availability.mlir | 3 +-
mlir/test/Dialect/Tosa/canonicalize.mlir | 22 ++++++---
mlir/test/Dialect/Tosa/invalid.mlir | 17 ++++---
mlir/test/Dialect/Tosa/ops.mlir | 8 ---
.../Tosa/tosa-decompose-transpose-conv.mlir | 49 ++++++++++---------
mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir | 10 ++--
14 files changed, 117 insertions(+), 183 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
index ce17ad9362227..15def695f6a54 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
@@ -197,14 +197,6 @@ def Tosa_PadOpQuantInfoBuilder : OpBuilder<
input, paddings);
}]>;
-def Tosa_ExplicitValuePadOpQuantInfoBuilder : OpBuilder<
- (ins "Type":$outputType, "Value":$input, "Value":$paddings,
- "Value":$pad_value),
- [{
- buildExplicitValuePadOpWithQuantInfo($_builder, $_state, outputType,
- input, paddings, pad_value);
- }]>;
-
// Wrapper over base I32EnumAttr to set common fields.
class Tosa_I32Enum<string name, string description, list<I32EnumAttrCase> cases>
: I32EnumAttr<name, description, cases> {
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
index 344a54f0bb1c9..f0797f97fd842 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
@@ -168,6 +168,10 @@ namespace tosa {
std::optional<Value> createZeroPointTensor(OpBuilder &builder, Location loc,
Type srcElemType, int64_t zp = 0);
+// Create a pad-const const tensor with value of `val` of required data-type
+std::optional<Value> createPadConstTensor(OpBuilder &builder, Location loc,
+ Value src, int32_t val = 0);
+
} // namespace tosa
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index abdd8347cb2b5..aedea883396f8 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -1882,8 +1882,7 @@ def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> {
let arguments = (ins
Tosa_RankedTensor:$input1,
Tosa_Shape:$padding,
- Optional<Tosa_ScalarTensor>:$pad_const,
- OptionalAttr<I32Attr>:$input_zp
+ Tosa_ScalarTensor:$pad_const
);
let results = (outs
@@ -1895,10 +1894,8 @@ def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> {
Extension<[Tosa_EXT_FP8E4M3, Tosa_EXT_FP8E5M2, Tosa_EXT_BF16]>,
];
- let builders = [Tosa_PadOpQuantInfoBuilder,
- Tosa_ExplicitValuePadOpQuantInfoBuilder];
+ let builders = [Tosa_PadOpQuantInfoBuilder];
- let hasCanonicalizer = 1;
let hasFolder = 1;
let hasVerifier = 1;
}
diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 7f029d56e2582..6a65904272991 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -350,29 +350,12 @@ class PadConverter : public OpConversionPattern<tosa::PadOp> {
}
ShapedType inputTy = cast<ShapedType>(input.getType());
- Type elementTy = inputTy.getElementType();
int64_t rank = inputTy.getRank();
// Setup the default constantAttr.
- Value padConstant;
-
- if (padOp.getPadConst()) {
- padConstant = rewriter.createOrFold<tensor::ExtractOp>(
- loc, padOp.getPadConst(), ValueRange({}));
- } else {
- TypedAttr constantAttr;
- if (isa<FloatType>(elementTy)) {
- constantAttr = rewriter.getFloatAttr(elementTy, 0.0);
- } else if (isa<IntegerType>(elementTy) && !padOp.getInputZpAttr()) {
- constantAttr = rewriter.getIntegerAttr(elementTy, 0);
- } else if (isa<IntegerType>(elementTy) && padOp.getInputZpAttr()) {
- int64_t value = padOp.getInputZpAttr().getInt();
- constantAttr = rewriter.getIntegerAttr(elementTy, value);
- }
- if (constantAttr)
- padConstant = rewriter.create<arith::ConstantOp>(loc, constantAttr);
- }
+ Value padConstant = rewriter.createOrFold<tensor::ExtractOp>(
+ loc, padOp.getPadConst(), ValueRange({}));
if (!padConstant) {
return rewriter.notifyMatchFailure(
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index 363b5958bc0fd..2c0376134b599 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -175,53 +175,6 @@ void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
results.add<ConsolidateTransposeOptimization, TransposeIsReshape>(context);
}
-struct MaterializePadValue : public OpRewritePattern<tosa::PadOp> {
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult matchAndRewrite(tosa::PadOp op,
- PatternRewriter &rewriter) const override {
- if (op.getPadConst())
- return failure();
-
- auto input = op.getInput1();
- auto padding = op.getPadding();
-
- ShapedType inputTy = llvm::cast<ShapedType>(input.getType());
- Type elementTy = inputTy.getElementType();
-
- Attribute constantAttr;
- if (llvm::isa<FloatType>(elementTy)) {
- constantAttr = rewriter.getFloatAttr(elementTy, 0.0);
- } else if (llvm::isa<IntegerType>(elementTy) && !op.getInputZpAttr()) {
- constantAttr = rewriter.getIntegerAttr(elementTy, 0);
- } else if (llvm::isa<IntegerType>(elementTy) && op.getInputZpAttr()) {
- int64_t value = op.getInputZpAttr().getInt();
- constantAttr = rewriter.getIntegerAttr(elementTy, value);
- }
-
- if (!constantAttr) {
- return rewriter.notifyMatchFailure(
- op,
- "tosa.pad to linalg lowering encountered an unknown element type");
- }
-
- auto denseAttr = DenseElementsAttr::get(
- RankedTensorType::get({1}, elementTy), constantAttr);
- auto constantVal = rewriter.create<tosa::ConstOp>(
- op.getLoc(), denseAttr.getType(), denseAttr);
-
- rewriter.replaceOpWithNewOp<tosa::PadOp>(
- op, op.getType(), ValueRange{input, padding, constantVal},
- op->getAttrs());
- return success();
- }
-};
-
-void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
- MLIRContext *context) {
- results.add<MaterializePadValue>(context);
-}
-
struct MaxPool2dIsNoOp : public OpRewritePattern<tosa::MaxPool2dOp> {
using OpRewritePattern::OpRewritePattern;
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 54f9fa917f2e0..a76a687c3f1eb 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -214,6 +214,23 @@ void mlir::tosa::printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type,
}
}
+// Create a pad-const const tensor with value of `val` of required data-type
+std::optional<Value> mlir::tosa::createPadConstTensor(OpBuilder &builder,
+ Location loc, Value src,
+ int32_t val) {
+ auto const srcType = getElementTypeOrSelf(src);
+ auto const srcElemType = getElementTypeOrSelf(src);
+ auto const padConstType = mlir::RankedTensorType::get({1}, srcType);
+ auto const padConstEType = mlir::RankedTensorType::get({1}, srcElemType);
+ auto const pad_const_attr{
+ llvm::isa<FloatType>(srcElemType)
+ ? DenseElementsAttr::get(padConstEType,
+ builder.getFloatAttr(srcElemType, val))
+ : DenseElementsAttr::get(padConstEType,
+ builder.getIntegerAttr(srcElemType, val))};
+ return builder.create<tosa::ConstOp>(loc, padConstType, pad_const_attr);
+}
+
//===----------------------------------------------------------------------===//
// Tosa utilities.
//===----------------------------------------------------------------------===//
@@ -679,30 +696,14 @@ static void buildUnaryOpWithQuantInfo(OpBuilder &builder,
static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result,
Type outputType, Value input,
Value paddings) {
- result.addOperands({input, paddings});
- auto quantAttr = buildPadOpQuantizationAttr(builder, input);
+ const Location loc{result.location};
+ int32_t zp{0};
+ auto const quantAttr = buildPadOpQuantizationAttr(builder, input);
if (quantAttr) {
- result.addAttribute("input_zp",
- builder.getI32IntegerAttr(
- static_cast<int32_t>(quantAttr.getInputZp())));
- }
- result.types.push_back(outputType);
-}
-
-/// This builder is called on TOSA pad operator when an explicit pad_const
-/// value is passed in. It also optionally constructs quantization_attr.
-static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder,
- OperationState &result,
- Type outputType, Value input,
- Value paddings,
- Value padConst) {
- result.addOperands({input, paddings, padConst});
- auto quantAttr = buildPadOpQuantizationAttr(builder, input);
- if (quantAttr) {
- result.addAttribute("input_zp",
- builder.getI32IntegerAttr(
- static_cast<int32_t>(quantAttr.getInputZp())));
+ zp = static_cast<int32_t>(quantAttr.getInputZp());
}
+ auto const pad_const_op{createPadConstTensor(builder, loc, input, zp)};
+ result.addOperands({input, paddings, pad_const_op.value()});
result.types.push_back(outputType);
}
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index 83bdbce5d1857..b629c3e7df510 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -148,16 +148,16 @@ class TransposeConvStridedConverter
return rewriter.notifyMatchFailure(
op, "zero point must be zero for non-int8 integer types");
- if (weightZpVal != 0) {
- weight = CreateOpAndInferShape<tosa::PadOp>(
- rewriter, loc, UnrankedTensorType::get(weightETy), weight,
- weightPaddingVal, nullptr, rewriter.getI32IntegerAttr(weightZpVal));
-
- } else {
- weight = CreateOpAndInferShape<tosa::PadOp>(
- rewriter, loc, UnrankedTensorType::get(weightETy), weight,
- weightPaddingVal);
- }
+ // construct pad_const values from zp values
+ ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
+ Value const inputPadConst =
+ createPadConstTensor(builder, op->getLoc(), input, inputZpVal).value();
+ Value const weightPadConst =
+ createPadConstTensor(builder, op->getLoc(), input, weightZpVal).value();
+
+ weight = CreateOpAndInferShape<tosa::PadOp>(
+ rewriter, loc, UnrankedTensorType::get(weightETy), weight,
+ weightPaddingVal, weightPadConst);
weightTy = cast<ShapedType>(weight.getType());
weightHeight = weightTy.getDimSize(1);
@@ -169,7 +169,7 @@ class TransposeConvStridedConverter
stride[0], weightWidth / stride[1],
stride[1], inputChannels};
- ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
+
weight = CreateOpAndInferShape<tosa::ReshapeOp>(
builder, UnrankedTensorType::get(weightETy), weight,
getTosaConstShape(rewriter, loc, weightReshapeDims0));
@@ -206,15 +206,9 @@ class TransposeConvStridedConverter
Value inputPaddingVal =
getTosaConstShape(rewriter, op->getLoc(), inputPadding);
- if (inputZpVal != 0) {
- input = CreateOpAndInferShape<tosa::PadOp>(
- rewriter, loc, UnrankedTensorType::get(inputETy), input,
- inputPaddingVal, nullptr, rewriter.getI32IntegerAttr(inputZpVal));
- } else {
- input = CreateOpAndInferShape<tosa::PadOp>(
- rewriter, loc, UnrankedTensorType::get(inputETy), input,
- inputPaddingVal);
- }
+ input = CreateOpAndInferShape<tosa::PadOp>(
+ rewriter, loc, UnrankedTensorType::get(inputETy), input,
+ inputPaddingVal, inputPadConst);
// We use a zero bias as we need to broadcast the bias.
auto zeroBias = rewriter.create<tosa::ConstOp>(
diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
index 6b7f622d3303f..c7a689f5a9ae9 100644
--- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
+++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
@@ -498,35 +498,38 @@ func.func @slice_dyn(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]:
func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
// CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
- // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32
+ // CHECK-DAG: [[CST:%.+]] = arith.constant 3.140000e+00 : f32
// CHECK: tensor.pad %[[ARG0]] low{{\[}}[[INDEX1]], [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
// CHECK: tensor.yield [[CST]]
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
- %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, !tosa.shape<4>) -> (tensor<4x9xf32>)
+ %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> (tensor<4x9xf32>)
return %1 : tensor<4x9xf32>
}
// -----
func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
- // CHECK: [[CST:%.+]] = arith.constant 0 : i32
+ %pad_const = "tosa.const"() {value = dense<3> : tensor<1xi32>} : () -> tensor<1xi32>
+ // CHECK: [[CST:%.+]] = arith.constant 3 : i32
// CHECK: tensor.pad
// CHECK: tensor.yield [[CST]]
- %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xi32>, !tosa.shape<4>) -> (tensor<4x9xi32>)
+ %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<1x2xi32>, !tosa.shape<4>, tensor<1xi32>) -> (tensor<4x9xi32>)
return %1 : tensor<4x9xi32>
}
// -----
func.func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
- // CHECK: [[CST:%.+]] = arith.constant 42 : i32
+ %pad_const = "tosa.const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
+ // CHECK: [[CST:%.+]] = arith.constant 0 : i32
// CHECK: tensor.pad
// CHECK: tensor.yield [[CST]]
- %1 = "tosa.pad"(%arg0, %0) {input_zp = 42 : i32} : (tensor<1x2xi32>, !tosa.shape<4>) -> (tensor<4x9xi32>)
+ %1 = "tosa.pad"(%arg0, %0, %pad_const) {input_zp = 42 : i32} : (tensor<1x2xi32>, !tosa.shape<4>, tensor<1xi32>) -> (tensor<4x9xi32>)
return %1 : tensor<4x9xi32>
}
@@ -551,30 +554,32 @@ func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
// CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
- // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32
+ // CHECK-DAG: [[CST:%.+]] = arith.constant 3.140000e+00 : f32
// CHECK: tensor.pad %[[ARG0]] low{{\[}}[[INDEX1]], [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
// CHECK: tensor.yield [[CST]]
// CHECK: } : tensor<?x2xf32> to tensor<?x9xf32>
- %1 = "tosa.pad"(%arg0, %0) : (tensor<?x2xf32>, !tosa.shape<4>) -> (tensor<?x9xf32>)
+ %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<?x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> (tensor<?x9xf32>)
return %1 : tensor<?x9xf32>
}
// -----
func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
%0 = tosa.const_shape {value = dense<[-1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant -1 : index
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
// CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
- // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32
+ // CHECK-DAG: [[CST:%.+]] = arith.constant 3.140000e+00 : f32
// CHECK: tensor.pad %[[ARG0]] low{{\[}}[[INDEX1]], [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
// CHECK: tensor.yield [[CST]]
// CHECK: } : tensor<1x2xf32> to tensor<?x9xf32>
- %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, !tosa.shape<4>) -> (tensor<?x9xf32>)
+ %1 = "tosa.pad"(%arg0, %0, %pad_const) : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> (tensor<?x9xf32>)
return %1 : tensor<?x9xf32>
}
diff --git a/mlir/test/Dialect/Tosa/availability.mlir b/mlir/test/Dialect/Tosa/availability.mlir
index 7324b0ea52e89..4203132e9f702 100644
--- a/mlir/test/Dialect/Tosa/availability.mlir
+++ b/mlir/test/Dialect/Tosa/availability.mlir
@@ -512,9 +512,10 @@ func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -
// CHECK-LABEL: pad
func.func @test_pad(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%padding = tosa.const_shape {value = dense<0> : tensor<6xindex>} : () -> !tosa.shape<6>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// CHECK: profiles: [ [pro_int, pro_fp] ]
// CHECK: extensions: [ [fp8e4m3, fp8e5m2, bf16] ]
- %0 = tosa.pad %arg0, %padding : (tensor<13x21x3xf32>, !tosa.shape<6>) -> tensor<13x21x3xf32>
+ %0 = tosa.pad %arg0, %padding, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21x3xf32>
return %0 : tensor<13x21x3xf32>
}
diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir
index 175145f332f8e..f7874aaebee21 100644
--- a/mlir/test/Dialect/Tosa/canonicalize.mlir
+++ b/mlir/test/Dialect/Tosa/canonicalize.mlir
@@ -258,7 +258,8 @@ func.func @max_pool2d_is_noop(%arg0: tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf3
func.func @pad_noop(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: return %arg0
%0 = tosa.const_shape { value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4>
- %1 = tosa.pad %arg0, %0 : (tensor<?x?xf32>, !tosa.shape<4>) -> tensor<?x?xf32>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
+ %1 = tosa.pad %arg0, %0, %pad_const : (tensor<?x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
@@ -269,7 +270,8 @@ func.func @pad_noop_padding_mismatch_nofold(%arg0: tensor<?x?xf32>) -> tensor<?x
// CHECK: %[[PAD:.+]] = tosa.pad
// CHECK: return %[[PAD]]
%shape = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
- %1 = tosa.pad %arg0, %shape : (tensor<?x?xf32>, !tosa.shape<4>) -> tensor<?x?xf32>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
+ %1 = tosa.pad %arg0, %shape, %pad_const : (tensor<?x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
@@ -280,7 +282,8 @@ func.func @pad_noop_type_mismatch_nofold(%arg0: tensor<10xf32>) -> tensor<?xf32>
// CHECK: %[[PAD:.+]] = tosa.pad
// CHECK: return %[[PAD]]
%shape = tosa.const_shape { value = dense<[1, 2]> : tensor<2xindex>} : () -> !tosa.shape<2>
- %0 = tosa.pad %arg0, %shape : (tensor<10xf32>, !tosa.shape<2>) -> tensor<?xf32>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
+ %0 = tosa.pad %arg0, %shape, %pad_const : (tensor<10xf32>, !tosa.shape<2>, tensor<1xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}
@@ -291,8 +294,9 @@ func.func @pad_determine_val_i32(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>
// CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<1xi32>}
// CHECK-DAG: %[[PADDING:.+]] = tosa.const_shape {value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
// CHECK: tosa.pad %arg0, %[[PADDING]], %[[ZERO]]
+ %pad_const = "tosa.const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
%0 = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
- %1 = tosa.pad %arg0, %0 : (tensor<?x?xi32>, !tosa.shape<4>) -> tensor<?x?xi32>
+ %1 = tosa.pad %arg0, %0, %pad_const : (tensor<?x?xi32>, !tosa.shape<4>, tensor<1xi32>) -> tensor<?x?xi32>
return %1 : tensor<?x?xi32>
}
@@ -300,11 +304,12 @@ func.func @pad_determine_val_i32(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>
// CHECK-LABEL: @pad_determine_val_f32
func.func @pad_determine_val_f32(%arg0: tensor<?x?xf32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xf32> {
- // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<1xf32>}
+ // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<3.140000e+00> : tensor<1xf32>}
// CHECK-DAG: %[[PADDING:.+]] = tosa.const_shape {value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
// CHECK: tosa.pad %arg0, %[[PADDING]], %[[ZERO]]
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
%0 = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
- %1 = tosa.pad %arg0, %0 : (tensor<?x?xf32>, !tosa.shape<4>) -> tensor<?x?xf32>
+ %1 = tosa.pad %arg0, %0, %pad_const : (tensor<?x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
@@ -312,11 +317,12 @@ func.func @pad_determine_val_f32(%arg0: tensor<?x?xf32>, %arg1 : tensor<2x2xi32>
// CHECK-LABEL: @pad_determine_val_quant
func.func @pad_determine_val_quant(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xi32> {
- // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi32>}
+ // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<3> : tensor<1xi32>}
// CHECK-DAG: %[[PADDING:.+]] = tosa.const_shape {value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
// CHECK: tosa.pad %arg0, %[[PADDING]], %[[ZERO]]
+ %pad_const = "tosa.const"() {value = dense<3> : tensor<1xi32>} : () -> tensor<1xi32>
%0 = tosa.const_shape { value = dense<[1, 0, 0, 1]> : tensor<4xindex>} : () -> !tosa.shape<4>
- %1 = tosa.pad %arg0, %0 {input_zp = 42 : i32} : (tensor<?x?xi32>, !tosa.shape<4>) -> tensor<?x?xi32>
+ %1 = tosa.pad %arg0, %0, %pad_const {input_zp = 42 : i32} : (tensor<?x?xi32>, !tosa.shape<4>, tensor<1xi32>) -> tensor<?x?xi32>
return %1 : tensor<?x?xi32>
}
diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir
index 967db6617e29e..87478ab11f9d2 100644
--- a/mlir/test/Dialect/Tosa/invalid.mlir
+++ b/mlir/test/Dialect/Tosa/invalid.mlir
@@ -211,8 +211,9 @@ func.func @test_concat_element_type_mismatch(%arg0 : tensor<1x2xf32>, %arg1 : te
// -----
func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: !tosa.shape<6>) -> tensor<13x21x3xf32> {
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// expected-error at +1 {{'tosa.pad' op shape operand is not compile time resolvable}}
- %0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, !tosa.shape<6>) -> tensor<13x21x3xf32>
+ %0 = tosa.pad %arg0, %arg1, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21x3xf32>
return %0 : tensor<13x21x3xf32>
}
@@ -228,18 +229,19 @@ func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<1xi8>) ->
// -----
func.func @test_pad_io_rank_mismatch(%arg0: tensor<13x21xf32>) {
- %padding = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// expected-error at +1 {{'tosa.pad' op expect same input and output tensor rank.}}
- %1 = tosa.pad %arg0, %padding : (tensor<13x21xf32>, !tosa.shape<4>) -> tensor<13x21x3xf32>
- return
+ %1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<13x21x3xf32>
}
// -----
func.func @test_pad_invalid_padding_rank(%arg0: tensor<13x21xf32>) {
%0 = tosa.const_shape {value = dense<1> : tensor<6xindex>} : () -> !tosa.shape<6>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// expected-error at +1 {{'tosa.pad' op expected padding tensor dim 0 to have size 4 (2*rank(shape1)) but got size 6}}
- %1 = tosa.pad %arg0, %0 : (tensor<13x21xf32>, !tosa.shape<6>) -> tensor<13x21xf32>
+ %1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21xf32>
return
}
@@ -256,9 +258,10 @@ func.func @test_pad_invalid_padConst_rank(%arg0: tensor<13x21xf32>, %arg1: tenso
// -----
func.func @test_pad_padding_shape_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
- %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %0 = tosa.const_shape {value = dense<1> : tensor<4xindex>} : () -> !tosa.shape<4>
+ %pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// expected-error at +1 {{'tosa.pad' op expected padding tensor dim 0 to have size 6 (2*rank(shape1)) but got size 4}}
- %1 = tosa.pad %arg0, %0 : (tensor<13x21x3xf32>, !tosa.shape<4>) -> tensor<13x21x3xf32>
+ %1 = tosa.pad %arg0, %0, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<13x21x3xf32>
return %1 : tensor<13x21x3xf32>
}
diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir
index cb45c4465cde6..fc6d3a47c85a4 100644
--- a/mlir/test/Dialect/Tosa/ops.mlir
+++ b/mlir/test/Dialect/Tosa/ops.mlir
@@ -580,14 +580,6 @@ func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -
return %0 : tensor<26x21x3xf32>
}
-// -----
-// CHECK-LABEL: pad
-func.func @test_pad(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
- %padding = tosa.const_shape {value = dense<0> : tensor<6xindex>} : () -> !tosa.shape<6>
- %0 = tosa.pad %arg0, %padding : (tensor<13x21x3xf32>, !tosa.shape<6>) -> tensor<13x21x3xf32>
- return %0 : tensor<13x21x3xf32>
-}
-
// -----
// CHECK-LABEL: pad_explicit_value
func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
index 0167bf10ed0ae..0a41931b24523 100644
--- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
@@ -93,8 +93,10 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor<
func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1: tensor<5x3x5x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x35x47x5xi32>) {
// Manipulate the weight matrix to handle striding.
+ // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-22> : tensor<1xi8>}> : () -> tensor<1xi8>
+ // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi8>}> : () -> tensor<1xi8>
// CHECK-DAG: %[[PADV:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 1, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
- // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] {input_zp = 42 : i32}
+ // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]], %[[WEIGHT_ZP]]
// CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[5, 2, 2, 2, 3, 3]> : tensor<6xindex>}
// CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]], %[[CONST1]]
// CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]] {perms = array<i32: 2, 4, 0, 1, 3, 5>}
@@ -102,30 +104,28 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1
// CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]], %[[CONST3]]
// CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i32}
// CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i32}
- // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4>
- // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4>
// Pad out the input matrix to handle the transpose conv.
// CHECK-DAG: %[[PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
- // CHECK-DAG: %[[NEWINPUT:.+]] = tosa.pad %arg0, %[[PAD]] {input_zp = -22 : i32}
+ // CHECK-DAG: %[[NEWINPUT:.+]] = tosa.pad %arg0, %[[PAD]], %[[INPUT_ZP]]
// Manipulate the final shape.
// CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() <{value = dense<0> : tensor<30xi32>}
- // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-22> : tensor<1xi8>}
- // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi8>}
// CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>}
- // CHECK-DAG: %[[CONV_NEW_SHAPE:.*]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>}
- // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONV_NEW_SHAPE]]
+ // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>}
+ // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONST6]]
// CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]] {perms = array<i32: 0, 1, 3, 2, 4, 5>}
- // CHECK-DAG: %[[TRANS_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex>}
- // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[TRANS_NEW_SHAPE]]
- // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]]
- // CHECK-DAG: %[[ARG2_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>}
- // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[ARG2_NEW_SHAPE]]
+ // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex>}
+ // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[CONST8]]
+ // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>}
+ // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>}
+ // CHECK-DAG: %[[SLICE:.*]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]]
+ // CHECK-DAG: %[[CONST9:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>}
+ // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[CONST9]]
// CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]]
- %input_zp = "tosa.const"() {value = dense<-22> : tensor<1xi8>} : () -> tensor<1xi8>
- %weight_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8>
- %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = i32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: -1, -1, -1, -1>, stride = array<i64: 2, 3>} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<2x35x47x5xi32>
+ %input_zp = "tosa.const"() <{value = dense<-22> : tensor<1xi8>}> : () -> tensor<1xi8>
+ %weight_zp = "tosa.const"() <{value = dense<42> : tensor<1xi8>}> : () -> tensor<1xi8>
+ %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = i32, out_pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 2, 3>} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<2x35x47x5xi32>
return %0 : tensor<2x35x47x5xi32>
}
@@ -133,24 +133,25 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1
// CHECK-LABEL: @transpose_conv2d_strided_overpad
func.func @transpose_conv2d_strided_overpad(%arg0 : tensor<1x16x1x1xi8>, %arg1 : tensor<1x2x1x1xi8>, %arg2 : tensor<1xi32>) -> (tensor<1x19x2x1xi32>) {
- // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>}
+ // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
// CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[1, 2, 1, 1, 2, 1]> : tensor<6xindex>}
+ // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-103> : tensor<1xi8>}> : () -> tensor<1xi8>
+ // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<93> : tensor<1xi8>}> : () -> tensor<1xi8>
// CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[2, 2, 1, 1]> : tensor<4xindex>}
- // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>}
+ // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
// CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<2xi32>}
// CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[1, 17, 1, 1, 2, 1]> : tensor<6xindex>}
// CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[1, 17, 2, 1]> : tensor<4xindex>}
- // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>}
+ // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
// CHECK-DAG: %[[CONST10:.+]] = tosa.const_shape {value = dense<1> : tensor<4xindex>}
- // CHECK-DAG: %[[INPUT_ZP:.*]] = "tosa.const"() <{value = dense<-103> : tensor<1xi8>}>
- // CHECK-DAG: %[[WEIGHT_ZP:.*]] = "tosa.const"() <{value = dense<93> : tensor<1xi8>}>
- // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]] {input_zp = 93 : i32}
+ // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]], %[[WEIGHT_ZP]]
// CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]], %[[CONST1]]
// CHECK: %[[TRANSPOSE_WEIGHT:.+]] = tosa.transpose %[[RESHAPE_WEIGHT_0]] {perms = array<i32: 2, 4, 0, 1, 3, 5>}
// CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]], %[[CONST3]]
// CHECK: %[[REVERSE:.+]] = tosa.reverse %[[RESHAPE_WEIGHT_1]] {axis = 1 : i32}
- // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]] {input_zp = -103 : i32}
- // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>}
+ // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]], %[[INPUT_ZP]]
+ // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]], %[[INPUT_ZP]], %[[WEIGHT_ZP]]
+ // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]}
// CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]], %[[CONST6]]
// CHECK: %[[TRANSPOSE_RESULT:.+]] = tosa.transpose %[[RESHAPE_RESULT_0]] {perms = array<i32: 0, 1, 3, 2, 4, 5>}
// CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]], %[[CONST8]]
diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
index 8a3dbfe17d686..75f91748cfaee 100644
--- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
@@ -469,8 +469,9 @@ func.func @test_concat_axis_1(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>)
// CHECK-LABEL:@test_padding_dynamic_input
func.func @test_padding_dynamic_input(%arg0 : tensor<1x?xf32>) -> () {
%0 = tosa.const_shape { value = dense<[1, 2, 3, 4]> : tensor<4xindex> } : () -> !tosa.shape<4>
- // CHECK: tosa.pad %arg0, %0 : (tensor<1x?xf32>, !tosa.shape<4>) -> tensor<4x?xf32>
- %1 = tosa.pad %arg0, %0 : (tensor<1x?xf32>, !tosa.shape<4>) -> tensor<?x?xf32>
+ %1 = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
+ // CHECK: tosa.pad %arg0, %0, %1 : (tensor<1x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<4x?xf32>
+ %2 = tosa.pad %arg0, %0, %1 : (tensor<1x?xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<?x?xf32>
return
}
@@ -479,8 +480,9 @@ func.func @test_padding_dynamic_input(%arg0 : tensor<1x?xf32>) -> () {
// CHECK-LABEL: @test_padding_simple
func.func @test_padding_simple(%arg0 : tensor<1x2xf32>) -> () {
%0 = tosa.const_shape { value = dense<[1, 2, 3, 4]> : tensor<4xindex> } : () -> !tosa.shape<4>
- // CHECK: tosa.pad %arg0, %0 : (tensor<1x2xf32>, !tosa.shape<4>) -> tensor<4x9xf32>
- %1 = tosa.pad %arg0, %0 : (tensor<1x2xf32>, !tosa.shape<4>) -> tensor<?x?xf32>
+ %1 = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
+ // CHECK: tosa.pad %arg0, %0, %1 : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<4x9xf32>
+ %2 = tosa.pad %arg0, %0, %1 : (tensor<1x2xf32>, !tosa.shape<4>, tensor<1xf32>) -> tensor<?x?xf32>
return
}
More information about the Mlir-commits
mailing list