[Mlir-commits] [mlir] f5f7e2a - [mlir][tosa] Constant optimizations for reduce operations
Maya Amrami
llvmlistbot at llvm.org
Thu Sep 21 09:19:57 PDT 2023
Author: Amir Bishara
Date: 2023-09-21T19:19:50+03:00
New Revision: f5f7e2a336ce657c20859295bbf542f71cdf4d3e
URL: https://github.com/llvm/llvm-project/commit/f5f7e2a336ce657c20859295bbf542f71cdf4d3e
DIFF: https://github.com/llvm/llvm-project/commit/f5f7e2a336ce657c20859295bbf542f71cdf4d3e.diff
LOG: [mlir][tosa] Constant optimizations for reduce operations
Replace the different reduce operations which is getting
a constant tensor as an input argument with a constant
tensor.
As the arguement of the reduce operation is constant tensor
and has only a single user we could calculate the resulted
constant tensor in compilation time and replace it
with reduced memory tensor
This optimization has been implemented for:
tosa.reduce_sum
tosa.reduce_prod
tosa.reduce_any
tosa.reduce_all
tosa.reduce_max
tosa.reduce_min
Reviewed By: rsuderman
Differential Revision: https://reviews.llvm.org/D154832
Added:
Modified:
mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp
mlir/test/Dialect/Tosa/constant-op-fold.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index 83e9d760aa896d6..e7da35a0c8145ac 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -1273,6 +1273,11 @@ def Tosa_ReduceAllOp : Tosa_InferTensorTypeOp<"reduce_all"> {
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
+
+ /// Return the AND result between two integer operands
+ static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) {
+ return leftOperand & rightOperand;
+ }
}];
}
@@ -1301,6 +1306,11 @@ def Tosa_ReduceAnyOp : Tosa_InferTensorTypeOp<"reduce_any"> {
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
+
+ /// Return the OR result between two integer operands
+ static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) {
+ return leftOperand | rightOperand;
+ }
}];
}
@@ -1329,6 +1339,12 @@ def Tosa_ReduceMaxOp : Tosa_InferTensorTypeOp<"reduce_max"> {
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
+
+ /// Return the max of the two integer operands
+ static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) {
+ const llvm::APInt subtractRes = leftOperand - rightOperand;
+ return (!subtractRes.isNegative()) ? leftOperand : rightOperand;
+ }
}];
}
@@ -1357,6 +1373,12 @@ def Tosa_ReduceMinOp : Tosa_InferTensorTypeOp<"reduce_min"> {
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
+
+ /// Return the min of the two integer operands
+ static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) {
+ const llvm::APInt subtractRes = leftOperand - rightOperand;
+ return (!subtractRes.isNegative()) ? rightOperand : leftOperand;
+ }
}];
}
@@ -1385,6 +1407,11 @@ def Tosa_ReduceProdOp : Tosa_InferTensorTypeOp<"reduce_prod"> {
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
+
+ /// Return the prod of the two integer operands
+ static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) {
+ return leftOperand * rightOperand;
+ }
}];
}
@@ -1406,13 +1433,17 @@ def Tosa_ReduceSumOp : Tosa_InferTensorTypeOp<"reduce_sum"> {
let results = (outs
Tosa_Tensor:$output
);
-
let hasFolder = 1;
let extraClassDeclaration = [{
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
+
+ /// Return the sum of the two integer operands
+ static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) {
+ return leftOperand + rightOperand;
+ }
}];
}
diff --git a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
index 72846d5dbe48908..6b5dd9c970703ee 100644
--- a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
@@ -34,6 +34,8 @@ void populateTosaFoldConstantReciprocalPatterns(MLIRContext *ctx,
RewritePatternSet &patterns);
void populateTosaFoldConstantTransposePatterns(MLIRContext *ctx,
RewritePatternSet &patterns);
+void populateTosaConstantReduction(MLIRContext *ctx,
+ RewritePatternSet &patterns);
std::unique_ptr<Pass> createTosaLayerwiseConstantFoldPass();
std::unique_ptr<Pass> createTosaInferShapesPass();
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
index 58693991952a3b3..0988759b82201df 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include <functional>
+#include <numeric>
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/Dialect/Tosa/Transforms/Passes.h"
@@ -289,8 +290,130 @@ struct TosaFoldConstantReciprocal : public OpRewritePattern<ReciprocalOp> {
}
};
+/// Getting the axes position of the element which is located
+/// in the tensor at the counter index
+
+llvm::SmallVector<int64_t>
+getPositionFromIndex(int64_t index, llvm::ArrayRef<int64_t> tensorShape) {
+ int64_t remaining = index;
+ llvm::SmallVector<int64_t> position(tensorShape.size(), 0);
+ for (int64_t i = tensorShape.size() - 1; i >= 0; --i) {
+ position[i] = remaining % tensorShape[i];
+ remaining /= tensorShape[i];
+ }
+ return position;
+}
+
+/// Getting the index of the element which is located at the
+/// axes position in the tensor
+
+int64_t getIndexFromPosition(llvm::ArrayRef<int64_t> position,
+ llvm::ArrayRef<int64_t> tensorShape) {
+ int64_t index = 0;
+ int64_t multiplierTmp = 1;
+ for (int64_t i = position.size() - 1; i >= 0; --i) {
+ index += position[i] * multiplierTmp;
+ multiplierTmp *= tensorShape[i];
+ }
+ return index;
+}
+
+template <typename OperationType>
+llvm::APInt calculateReducedValue(const mlir::ElementsAttr &oldTensorAttr,
+ llvm::ArrayRef<int64_t> oldShape,
+ int64_t reductionAxis,
+ int64_t reductionIndex) {
+
+ llvm::SmallVector<int64_t> newShape(oldShape);
+ newShape[reductionAxis] = 1;
+ /// Let's calculate the position of the index
+ llvm::SmallVector<int64_t> position =
+ getPositionFromIndex(reductionIndex, newShape);
+ auto oldTensor = oldTensorAttr.getValues<llvm::APInt>();
+ /// Starting from the first positon along the reduction axis
+ position[reductionAxis] = 0;
+ int64_t indexAtOldTensor = getIndexFromPosition(position, oldShape);
+ llvm::APInt reducedValue = oldTensor[indexAtOldTensor];
+
+ for (int64_t reductionAxisVal = 1; reductionAxisVal < oldShape[reductionAxis];
+ ++reductionAxisVal) {
+
+ int64_t stride = std::accumulate(oldShape.begin() + reductionAxis + 1,
+ oldShape.end(), 1, std::multiplies<int>());
+ int64_t index = indexAtOldTensor + stride * reductionAxisVal;
+ reducedValue =
+ OperationType::calcOneElement(reducedValue, oldTensor[index]);
+ }
+ return reducedValue;
+}
+
+template <typename OperationType>
+struct ReduceConstantOptimization : public OpRewritePattern<OperationType> {
+
+ using OpRewritePattern<OperationType>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(OperationType op,
+ PatternRewriter &rewriter) const override {
+ Value inputOp = op.getInput();
+ auto constOp = inputOp.getDefiningOp<tosa::ConstOp>();
+
+ if (!constOp)
+ return rewriter.notifyMatchFailure(
+ op, "reduce input must be const operation");
+
+ if (!inputOp.hasOneUse())
+ return rewriter.notifyMatchFailure(
+ op, "input operation has more than one user");
+
+ auto resultType = cast<ShapedType>(op.getOutput().getType());
+
+ if (!resultType.hasStaticShape())
+ return rewriter.notifyMatchFailure(op, "result type shape is not static");
+
+ auto reductionAxis = op.getAxis();
+ const auto denseElementsAttr = constOp.getValue();
+ const auto shapedOldElementsValues =
+ denseElementsAttr.getType().cast<ShapedType>();
+
+ if (!llvm::isa<IntegerType>(shapedOldElementsValues.getElementType()))
+ return rewriter.notifyMatchFailure(
+ op, "reduce input currently supported with integer type");
+
+ auto oldShape = shapedOldElementsValues.getShape();
+ auto newShape = resultType.getShape();
+
+ auto newNumOfElements = std::accumulate(newShape.begin(), newShape.end(), 1,
+ std::multiplies<int>());
+ llvm::SmallVector<APInt> newReducedTensor(newNumOfElements);
+
+ for (int64_t reductionIndex = 0; reductionIndex < newNumOfElements;
+ ++reductionIndex) {
+
+ /// Let's reduce all the elements along this reduction axis
+ newReducedTensor[reductionIndex] = calculateReducedValue<OperationType>(
+ denseElementsAttr, oldShape, reductionAxis, reductionIndex);
+ }
+
+ auto rankedTensorType = cast<RankedTensorType>(resultType);
+ auto denseAttr =
+ mlir::DenseElementsAttr::get(rankedTensorType, newReducedTensor);
+ rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, rankedTensorType, denseAttr);
+ return success();
+ }
+};
+
} // namespace
+void mlir::tosa::populateTosaConstantReduction(MLIRContext *ctx,
+ RewritePatternSet &patterns) {
+ patterns.add<ReduceConstantOptimization<ReduceAllOp>>(ctx);
+ patterns.add<ReduceConstantOptimization<ReduceAnyOp>>(ctx);
+ patterns.add<ReduceConstantOptimization<ReduceMaxOp>>(ctx);
+ patterns.add<ReduceConstantOptimization<ReduceMinOp>>(ctx);
+ patterns.add<ReduceConstantOptimization<ReduceProdOp>>(ctx);
+ patterns.add<ReduceConstantOptimization<ReduceSumOp>>(ctx);
+}
+
void mlir::tosa::populateTosaFoldConstantTransposePatterns(
MLIRContext *ctx, RewritePatternSet &patterns) {
patterns.add<TosaFoldConstantTranspose>(ctx);
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp
index 2e2d338abbe4bfd..90f15faf0108103 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp
@@ -52,6 +52,7 @@ struct TosaLayerwiseConstantFoldPass
mlir::tosa::populateTosaFoldConstantReciprocalPatterns(ctx, patterns);
mlir::tosa::populateTosaFoldConstantTransposePatterns(ctx, patterns);
+ mlir::tosa::populateTosaConstantReduction(ctx, patterns);
populateTosaOpsCanonicalizationPatterns(ctx, patterns);
if (applyPatternsAndFoldGreedily(func, std::move(patterns)).failed())
diff --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir
index 39132c9744e8f70..e66082d83cb907e 100644
--- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir
+++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir
@@ -573,3 +573,481 @@ func.func @reverse_length_one(%arg0 : tensor<10x1xi32>) -> (tensor<10x1xi32>, te
// CHECK: return %[[NOFOLD]], %arg0
return %nofold, %fold : tensor<10x1xi32>, tensor<10x1xi32>
}
+
+// -----
+
+ func.func @reduce_sum_constant() -> tensor<1x3xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}5, 7, 9]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3xi32>
+
+ %const = "tosa.const"() {value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ return %0 : tensor<1x3xi32>
+ }
+
+// -----
+
+ func.func @reduce_sum_constant() -> tensor<2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [15]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_sum %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32>
+ return %0 : tensor<2x1xi32>
+ }
+
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<3x1xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<3x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [15], [24]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<3x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32>
+ %0 = tosa.reduce_sum %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32>
+ return %0 : tensor<3x1xi32>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<2x1x4xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x1x4xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[15, 18, 21, 24]], {{\[\[}}51, 54, 57, 60]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32>
+ %0 = tosa.reduce_sum %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32>
+ return %0 : tensor<2x1x4xi32>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<1x3x3xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[30, 33, 36], [39, 42, 45], [48, 51, 54]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32>
+ %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32>
+ return %0 : tensor<1x3x3xi32>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}3], [7]], {{\[\[}}11], [15]]], {{\[\[}}[19], [23]], {{\[\[}}27], [31]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32>
+ %0 = tosa.reduce_sum %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32>
+ return %0 : tensor<2x2x2x1xi32>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<1x1x1xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x1x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32>
+ return %0 : tensor<1x1x1xi32>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<2x3x1x5xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x3x1x5xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}34, 38, 42, 46, 50]], {{\[\[}}114, 118, 122, 126, 130]], {{\[\[}}194, 198, 202, 206, 210]]], {{\[\[}}[274, 278, 282, 286, 290]], {{\[\[}}354, 358, 362, 366, 370]], {{\[\[}}434, 438, 442, 446, 450]]]]> : tensor<2x3x1x5xi32>}> : () -> tensor<2x3x1x5xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x3x1x5xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20]], [[21, 22, 23, 24, 25], [26, 27, 28, 29, 30], [31, 32, 33, 34, 35], [36, 37, 38, 39, 40]], [[41, 42, 43, 44, 45], [46, 47, 48, 49, 50], [51, 52, 53, 54, 55], [56, 57, 58, 59, 60]]], [[[61, 62, 63, 64, 65], [66, 67, 68, 69, 70], [71, 72, 73, 74, 75], [76, 77, 78, 79, 80]], [[81, 82, 83, 84, 85], [86, 87, 88, 89, 90], [91, 92, 93, 94, 95], [96, 97, 98, 99, 100]], [[101, 102, 103, 104, 105], [106, 107, 108, 109, 110], [111, 112, 113, 114, 115], [116, 117, 118, 119, 120]]]]> : tensor<2x3x4x5xi32>}> : () -> tensor<2x3x4x5xi32>
+ %0 = tosa.reduce_sum %const {axis = 2 : i32} : (tensor<2x3x4x5xi32>) -> tensor<2x3x1x5xi32>
+ return %0 : tensor<2x3x1x5xi32>
+}
+
+// -----
+
+ func.func @reduce_prod_constant() -> tensor<1x3xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<1x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}4, 10, 18]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3xi32>
+
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_prod %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ return %0 : tensor<1x3xi32>
+ }
+
+// -----
+
+ func.func @reduce_prod_constant() -> tensor<2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [120]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1xi32>
+ // CHECK: }
+
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_prod %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32>
+ return %0 : tensor<2x1xi32>
+ }
+
+// -----
+
+func.func @reduce_prod_constant() -> tensor<3x1xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<3x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [120], [504]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<3x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32>
+ %0 = tosa.reduce_prod %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32>
+ return %0 : tensor<3x1xi32>
+}
+
+// -----
+
+func.func @reduce_prod_constant() -> tensor<2x1x4xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<2x1x4xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[45, 120, 231, 384]], {{\[\[}}4641, 5544, 6555, 7680]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32>
+ %0 = tosa.reduce_prod %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32>
+ return %0 : tensor<2x1x4xi32>
+}
+
+// -----
+
+func.func @reduce_prod_constant() -> tensor<1x3x3xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<1x3x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[190, 440, 756], [1144, 1610, 2160], [2800, 3536, 4374]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32>
+ %0 = tosa.reduce_prod %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32>
+ return %0 : tensor<1x3x3xi32>
+}
+
+// -----
+
+func.func @reduce_prod_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}2], [12]], {{\[\[}}30], [56]]], {{\[\[}}[90], [132]], {{\[\[}}182], [240]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32>
+ %0 = tosa.reduce_prod %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32>
+ return %0 : tensor<2x2x2x1xi32>
+}
+
+// -----
+
+func.func @reduce_prod_constant() -> tensor<1x1x1xi32> {
+ // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<1x1x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ %0 = tosa.reduce_prod %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32>
+ return %0 : tensor<1x1x1xi32>
+}
+
+// -----
+
+ func.func @reduce_max_constant() -> tensor<1x3xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<1x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}4, 5, 6]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3xi32>
+
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_max %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ return %0 : tensor<1x3xi32>
+ }
+
+// -----
+
+ func.func @reduce_max_constant() -> tensor<2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}3], [6]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1xi32>
+ // CHECK: }
+
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_max %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32>
+ return %0 : tensor<2x1xi32>
+ }
+
+// -----
+
+func.func @reduce_max_constant() -> tensor<3x1xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<3x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}3], [6], [9]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<3x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32>
+ %0 = tosa.reduce_max %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32>
+ return %0 : tensor<3x1xi32>
+}
+
+// -----
+
+func.func @reduce_max_constant() -> tensor<2x1x4xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<2x1x4xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[9, 10, 11, 12]], {{\[\[}}21, 22, 23, 24]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32>
+ %0 = tosa.reduce_max %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32>
+ return %0 : tensor<2x1x4xi32>
+}
+
+// -----
+
+func.func @reduce_max_constant() -> tensor<1x3x3xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<1x3x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32>
+ %0 = tosa.reduce_max %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32>
+ return %0 : tensor<1x3x3xi32>
+}
+
+// -----
+
+func.func @reduce_max_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}2], [4]], {{\[\[}}6], [8]]], {{\[\[}}[10], [12]], {{\[\[}}14], [16]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32>
+ %0 = tosa.reduce_max %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32>
+ return %0 : tensor<2x2x2x1xi32>
+}
+
+// -----
+
+func.func @reduce_max_constant() -> tensor<1x1x1xi32> {
+ // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<1x1x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ %0 = tosa.reduce_max %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32>
+ return %0 : tensor<1x1x1xi32>
+}
+
+// -----
+
+ func.func @reduce_min_constant() -> tensor<1x3xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<1x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1, 2, 3]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3xi32>
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_min %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ return %0 : tensor<1x3xi32>
+ }
+
+
+// -----
+
+ func.func @reduce_min_constant() -> tensor<2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1], [4]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1xi32>
+ // CHECK: }
+
+ %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_min %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32>
+ return %0 : tensor<2x1xi32>
+ }
+
+// -----
+
+func.func @reduce_min_constant() -> tensor<3x1xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<3x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1], [4], [7]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<3x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32>
+ %0 = tosa.reduce_min %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32>
+ return %0 : tensor<3x1xi32>
+}
+
+// -----
+
+func.func @reduce_min_constant() -> tensor<2x1x4xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<2x1x4xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[1, 2, 3, 4]], {{\[\[}}13, 14, 15, 16]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32>
+ %0 = tosa.reduce_min %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32>
+ return %0 : tensor<2x1x4xi32>
+}
+
+// -----
+
+func.func @reduce_min_constant() -> tensor<1x3x3xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<1x3x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[1, 2, 3], [4, 5, 6], [7, 8, 9]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32>
+ %0 = tosa.reduce_min %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32>
+ return %0 : tensor<1x3x3xi32>
+}
+
+// -----
+
+func.func @reduce_min_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<2x2x2x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}1], [3]], {{\[\[}}5], [7]]], {{\[\[}}[9], [11]], {{\[\[}}13], [15]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32>
+ %0 = tosa.reduce_min %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32>
+ return %0 : tensor<2x2x2x1xi32>
+}
+
+// -----
+
+func.func @reduce_min_constant() -> tensor<1x1x1xi32> {
+ // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<1x1x1xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32>
+ %0 = tosa.reduce_min %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32>
+ return %0 : tensor<1x1x1xi32>
+}
+
+// -----
+
+func.func @reduce_any_constant() -> tensor<1x3xi1> {
+ // CHECK-LABEL: func.func @reduce_any_constant() -> tensor<1x3xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<true> : tensor<1x3xi1>}> : () -> tensor<1x3xi1>
+ // CHECK: return %[[VAL_0]] : tensor<1x3xi1>
+
+ %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1>
+ %0 = tosa.reduce_any %const {axis = 0 : i32} : (tensor<2x3xi1>) -> tensor<1x3xi1>
+ return %0 : tensor<1x3xi1>
+}
+
+
+// -----
+
+func.func @reduce_any_constant() -> tensor<2x1xi1> {
+// CHECK-LABEL: func.func @reduce_any_constant() -> tensor<2x1xi1> {
+// CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<true> : tensor<2x1xi1>}> : () -> tensor<2x1xi1>
+// CHECK: return %[[VAL_0]] : tensor<2x1xi1>
+// CHECK: }
+
+ %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1>
+ %0 = tosa.reduce_any %const {axis = 1 : i32} : (tensor<2x3xi1>) -> tensor<2x1xi1>
+ return %0 : tensor<2x1xi1>
+}
+
+// -----
+
+func.func @reduce_any_constant() -> tensor<3x1xi1> {
+ // CHECK-LABEL: func.func @reduce_any_constant() -> tensor<3x1xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}true], [false], [true]]> : tensor<3x1xi1>}> : () -> tensor<3x1xi1>
+ // CHECK: return %[[VAL_0]] : tensor<3x1xi1>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[true, false, false], [false, false, false], [false, false, true]]> : tensor<3x3xi1>}> : () -> tensor<3x3xi1>
+ %0 = tosa.reduce_any %const {axis = 1 : i32} : (tensor<3x3xi1>) -> tensor<3x1xi1>
+ return %0 : tensor<3x1xi1>
+}
+
+// -----
+
+func.func @reduce_any_constant() -> tensor<2x1x4xi1> {
+ // CHECK-LABEL: func.func @reduce_any_constant() -> tensor<2x1x4xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[true, false, true, true]], {{\[\[}}true, false, true, false]]]> : tensor<2x1x4xi1>}> : () -> tensor<2x1x4xi1>
+ // CHECK: return %[[VAL_0]] : tensor<2x1x4xi1>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[true, false, false, true], [false, false, true, false], [true, false, true, true]], [[false, false, false, false], [false, false, true, false], [true, false, true, false]]]> : tensor<2x3x4xi1>}> : () -> tensor<2x3x4xi1>
+ %0 = tosa.reduce_any %const {axis = 1 : i32} : (tensor<2x3x4xi1>) -> tensor<2x1x4xi1>
+ return %0 : tensor<2x1x4xi1>
+}
+
+// -----
+
+ func.func @reduce_all_constant() -> tensor<1x3xi1> {
+ // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<1x3xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}true, false, true]]> : tensor<1x3xi1>}> : () -> tensor<1x3xi1>
+ // CHECK: return %[[VAL_0]] : tensor<1x3xi1>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1>
+ %0 = tosa.reduce_all %const {axis = 0 : i32} : (tensor<2x3xi1>) -> tensor<1x3xi1>
+ return %0 : tensor<1x3xi1>
+ }
+
+// -----
+
+ func.func @reduce_all_constant() -> tensor<2x1xi1> {
+ // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<2x1xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}true], [false]]> : tensor<2x1xi1>}> : () -> tensor<2x1xi1>
+ // CHECK: return %[[VAL_0]] : tensor<2x1xi1>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1>
+ %0 = tosa.reduce_all %const {axis = 1 : i32} : (tensor<2x3xi1>) -> tensor<2x1xi1>
+ return %0 : tensor<2x1xi1>
+ }
+
+// -----
+
+func.func @reduce_all_constant() -> tensor<3x1xi1> {
+ // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<3x1xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<false> : tensor<3x1xi1>}> : () -> tensor<3x1xi1>
+ // CHECK: return %[[VAL_0]] : tensor<3x1xi1>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[true, false, false], [false, false, false], [false, false, true]]> : tensor<3x3xi1>}> : () -> tensor<3x3xi1>
+ %0 = tosa.reduce_all %const {axis = 1 : i32} : (tensor<3x3xi1>) -> tensor<3x1xi1>
+ return %0 : tensor<3x1xi1>
+}
+
+// -----
+
+func.func @reduce_all_constant() -> tensor<2x1x4xi1> {
+ // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<2x1x4xi1> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<false> : tensor<2x1x4xi1>}> : () -> tensor<2x1x4xi1>
+ // CHECK: return %[[VAL_0]] : tensor<2x1x4xi1>
+ // CHECK: }
+ %const = "tosa.const"() <{value = dense<[[[true, false, false, true], [false, false, true, false], [true, false, true, true]], [[false, false, false, false], [false, false, true, false], [true, false, true, false]]]> : tensor<2x3x4xi1>}> : () -> tensor<2x3x4xi1>
+ %0 = tosa.reduce_all %const {axis = 1 : i32} : (tensor<2x3x4xi1>) -> tensor<2x1x4xi1>
+ return %0 : tensor<2x1x4xi1>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<1x3xi32> {
+// CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3xi32> {
+// CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<2> : tensor<1x3xi32>}> : () -> tensor<1x3xi32>
+// CHECK: return %[[VAL_0]] : tensor<1x3xi32>
+// CHECK: }
+ %const = "tosa.const"() <{value = dense<1> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ return %0 : tensor<1x3xi32>
+}
+
+// -----
+
+func.func @reduce_sum_constant() -> tensor<1x3xi32> {
+ // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3xi32> {
+ // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ // CHECK: %[[VAL_1:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1, 2, 3], [4, 5, 7]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ // CHECK: %[[VAL_2:.*]] = tosa.add %[[VAL_0]], %[[VAL_1]] : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32>
+ // CHECK: %[[VAL_3:.*]] = tosa.reduce_sum %[[VAL_2]] {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ // CHECK: return %[[VAL_3]] : tensor<1x3xi32>
+ %arg0 = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %arg1 = "tosa.const"() <{value = dense<[[1,2,3], [4,5,7]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32>
+ %arg2 = tosa.add %arg0, %arg1 : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32>
+ %0 = tosa.reduce_sum %arg2 {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32>
+ return %0 : tensor<1x3xi32>
+}
More information about the Mlir-commits
mailing list