[Mlir-commits] [mlir] [mlir][tosa] Add several level checks (PR #128074)
Luke Hutton
llvmlistbot at llvm.org
Fri Feb 28 04:17:35 PST 2025
================
@@ -111,133 +116,212 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
constCheckers.emplace_back(checkConstantOperandPad);
}
- bool levelCheckKernel(Operation *op, int32_t v,
- const std::string &checkDesc) {
+ bool levelCheckKernel(Operation *op, int32_t v, const StringRef checkDesc) {
if (v > tosaLevel.MAX_KERNEL) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
return true;
}
- bool levelCheckStride(Operation *op, int32_t v,
- const std::string &checkDesc) {
+ bool levelCheckStride(Operation *op, int32_t v, const StringRef checkDesc) {
if (v > tosaLevel.MAX_STRIDE) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
return true;
}
- bool levelCheckScale(Operation *op, int32_t v, const std::string &checkDesc) {
+ bool levelCheckScale(Operation *op, int32_t v, const StringRef checkDesc) {
if (v > tosaLevel.MAX_SCALE) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
return true;
}
- bool levelCheckRank(Operation *op, const Value &v,
- const std::string &checkDesc) {
+ bool levelCheckListSize(Operation *op, int32_t v, const StringRef checkDesc) {
+ if (v > tosaLevel.MAX_TENSOR_LIST_SIZE) {
+ op->emitOpError() << "failed level check for MAX_TENSOR_LIST_SIZE: "
+ << checkDesc;
+ return false;
+ }
+ return true;
+ }
+
+ bool levelCheckRankAndSizes(Operation *op, const Value &v,
+ const StringRef operandOrResult,
+ int32_t highest_rank) {
if (ShapedType type = dyn_cast<ShapedType>(v.getType())) {
if (!type.hasRank()) {
op->emitOpError() << "failed level check: unranked tensor";
return false;
}
- if (type.getRank() > tosaLevel.MAX_RANK) {
- op->emitOpError() << "failed level check: " << checkDesc;
+ if (type.getRank() > highest_rank) {
+ op->emitOpError() << "failed level check: " << operandOrResult
+ << " rank(shape) <= MAX_RANK";
+ return false;
+ }
+
+ auto shape = type.getShape();
+ for (auto dim : shape) {
+ if (mlir::ShapedType::isDynamic(dim)) {
+ op->emitOpError() << "failed level check: " << operandOrResult
+ << " shape dimension cannot be dynamic";
+ return false;
+ }
+ }
+
+ int64_t element_bits = type.getElementTypeBitWidth();
+ int64_t element_bytes = std::max(INT64_C(1), element_bits / 8);
+ int64_t size = element_bytes * type.getNumElements();
+
+ // According to 1.11. Tensor Definitions of Tosa spec, the value of
+ // tensor_size_t is 1 << MAX_LOG2_SIZE) - 1 where MAX_LOG2_SIZE is
+ // defined in 1.7. Levels.
+ // For each tensor, the number of tensor elements multiplied by the
+ // element size in bytes must be representable as a tensor_size_t.
+ const int64_t max_size = (INT64_C(1) << tosaLevel.MAX_LOG2_SIZE) - 1;
+ if (size > max_size) {
+ op->emitOpError()
+ << "failed level check: " << operandOrResult
+ << " tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)";
return false;
}
}
return true;
}
template <typename T>
- bool levelCheckRanksFor(Operation *op) {
- if (dyn_cast<T>(op)) {
- // level check ranks of all operands and results
- for (auto v : op->getOperands()) {
- if (!levelCheckRank(op, v, "operand rank(shape) <= MAX_RANK"))
- return false;
- }
- for (auto v : op->getResults()) {
- if (!levelCheckRank(op, v, "result rank(shape) <= MAX_RANK"))
- return false;
- }
+ bool levelCheckRanksAndSizesFor(T tosaOp) {
+ // level check ranks of all operands and results
+ auto op = tosaOp.getOperation();
+ for (auto v : op->getOperands()) {
+ if (!levelCheckRankAndSizes(op, v, "operand", tosaLevel.MAX_RANK))
+ return false;
+ }
+
+ for (auto v : op->getResults()) {
+ if (!levelCheckRankAndSizes(op, v, "result", tosaLevel.MAX_RANK))
+ return false;
}
return true;
}
- bool levelCheckRanks(Operation *op) {
-#define CHECK_RANKS_FOR(tosaOp) \
- if (!levelCheckRanksFor<tosaOp##Op>(op)) \
- return false;
+ template <>
+ bool levelCheckRanksAndSizesFor(tosa::ArgMaxOp tosaOp) {
+ auto op = tosaOp.getOperation();
+ if (!levelCheckRankAndSizes(op, tosaOp.getInput(), "operand",
+ tosaLevel.MAX_RANK))
+ return false;
+
+ // rank(output) = rank(input) - 1
+ if (!levelCheckRankAndSizes(op, tosaOp.getOutput(), "result",
+ tosaLevel.MAX_RANK - 1))
+ return false;
+
+ return true;
+ }
+
+ template <>
+ bool levelCheckRanksAndSizesFor(tosa::IfOp tosaOp) {
+ auto op = tosaOp.getOperation();
+
+ // Only the condition input has rank limitation.
+ if (!levelCheckRankAndSizes(op, tosaOp.getCond(), "operand",
+ tosaLevel.MAX_RANK))
+ return false;
+
+ return true;
+ }
+
+ bool levelCheckRanksAndSizes(Operation *op) {
+#define CHECK_RANKS_AND_SIZES_FOR(tosaOp) \
+ if (isa<tosa::tosaOp##Op>(op)) \
+ if (!levelCheckRanksAndSizesFor(cast<tosa::tosaOp##Op>(op))) \
+ return false;
+
+#define CHECK_RANKS_AND_SIZES_SKIP(tosaOp) \
+ if (isa<tosa::tosaOp##Op>(op)) \
+ return true;
// tensor operators:
- CHECK_RANKS_FOR(ArgMax);
+ CHECK_RANKS_AND_SIZES_FOR(ArgMax);
// all activation functions:
- CHECK_RANKS_FOR(Clamp);
- CHECK_RANKS_FOR(Sigmoid);
- CHECK_RANKS_FOR(Tanh);
+ CHECK_RANKS_AND_SIZES_FOR(Clamp);
+ CHECK_RANKS_AND_SIZES_FOR(Erf);
+ CHECK_RANKS_AND_SIZES_FOR(Sigmoid);
+ CHECK_RANKS_AND_SIZES_FOR(Tanh);
// all elementwise binary operators:
- CHECK_RANKS_FOR(Add);
- CHECK_RANKS_FOR(ArithmeticRightShift);
- CHECK_RANKS_FOR(BitwiseAnd);
- CHECK_RANKS_FOR(BitwiseOr);
- CHECK_RANKS_FOR(BitwiseXor);
- CHECK_RANKS_FOR(IntDiv);
- CHECK_RANKS_FOR(LogicalAnd);
- CHECK_RANKS_FOR(LogicalLeftShift);
- CHECK_RANKS_FOR(LogicalRightShift);
- CHECK_RANKS_FOR(LogicalOr);
- CHECK_RANKS_FOR(LogicalXor);
- CHECK_RANKS_FOR(Maximum);
- CHECK_RANKS_FOR(Minimum);
- CHECK_RANKS_FOR(Mul);
- CHECK_RANKS_FOR(Pow);
- CHECK_RANKS_FOR(Sub);
- CHECK_RANKS_FOR(Table);
+ CHECK_RANKS_AND_SIZES_FOR(Add);
+ CHECK_RANKS_AND_SIZES_FOR(ArithmeticRightShift);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseAnd);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseOr);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseXor);
+ CHECK_RANKS_AND_SIZES_FOR(IntDiv);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalAnd);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalLeftShift);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalRightShift);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalOr);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalXor);
+ CHECK_RANKS_AND_SIZES_FOR(Maximum);
+ CHECK_RANKS_AND_SIZES_FOR(Minimum);
+ CHECK_RANKS_AND_SIZES_FOR(Mul);
+ CHECK_RANKS_AND_SIZES_FOR(Pow);
+ CHECK_RANKS_AND_SIZES_FOR(Sub);
+ CHECK_RANKS_AND_SIZES_FOR(Table);
// all elementwise unary operators:
- CHECK_RANKS_FOR(Abs);
- CHECK_RANKS_FOR(BitwiseNot);
- CHECK_RANKS_FOR(Ceil);
- CHECK_RANKS_FOR(Clz);
- CHECK_RANKS_FOR(Exp);
- CHECK_RANKS_FOR(Floor);
- CHECK_RANKS_FOR(Log);
- CHECK_RANKS_FOR(LogicalNot);
- CHECK_RANKS_FOR(Negate);
- CHECK_RANKS_FOR(Reciprocal);
- CHECK_RANKS_FOR(Rsqrt);
+ CHECK_RANKS_AND_SIZES_FOR(Abs);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseNot);
+ CHECK_RANKS_AND_SIZES_FOR(Ceil);
+ CHECK_RANKS_AND_SIZES_FOR(Clz);
+ CHECK_RANKS_AND_SIZES_FOR(Cos);
+ CHECK_RANKS_AND_SIZES_FOR(Exp);
+ CHECK_RANKS_AND_SIZES_FOR(Floor);
+ CHECK_RANKS_AND_SIZES_FOR(Log);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalNot);
+ CHECK_RANKS_AND_SIZES_FOR(Negate);
+ CHECK_RANKS_AND_SIZES_FOR(Reciprocal);
+ CHECK_RANKS_AND_SIZES_FOR(Rsqrt);
+ CHECK_RANKS_AND_SIZES_FOR(Sin);
// all elementwise ternary operators:
- CHECK_RANKS_FOR(Select);
+ CHECK_RANKS_AND_SIZES_FOR(Select);
// all comparison operators:
- CHECK_RANKS_FOR(Equal);
- CHECK_RANKS_FOR(Greater);
- CHECK_RANKS_FOR(GreaterEqual);
+ CHECK_RANKS_AND_SIZES_FOR(Equal);
+ CHECK_RANKS_AND_SIZES_FOR(Greater);
+ CHECK_RANKS_AND_SIZES_FOR(GreaterEqual);
// all reduction operators:
- CHECK_RANKS_FOR(ReduceAll);
- CHECK_RANKS_FOR(ReduceAny);
- CHECK_RANKS_FOR(ReduceMax);
- CHECK_RANKS_FOR(ReduceMin);
- CHECK_RANKS_FOR(ReduceProduct);
- CHECK_RANKS_FOR(ReduceSum);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceAll);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceAny);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceMax);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceMin);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceProduct);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceSum);
// all data layout operators:
- CHECK_RANKS_FOR(Concat);
- CHECK_RANKS_FOR(Pad);
- CHECK_RANKS_FOR(Reshape);
- CHECK_RANKS_FOR(Reverse);
- CHECK_RANKS_FOR(Slice);
- CHECK_RANKS_FOR(Tile);
- CHECK_RANKS_FOR(Transpose);
+ CHECK_RANKS_AND_SIZES_FOR(Concat);
+ CHECK_RANKS_AND_SIZES_FOR(Pad);
+ CHECK_RANKS_AND_SIZES_FOR(Reshape);
+ CHECK_RANKS_AND_SIZES_FOR(Reverse);
+ CHECK_RANKS_AND_SIZES_FOR(Slice);
+ CHECK_RANKS_AND_SIZES_FOR(Tile);
+ CHECK_RANKS_AND_SIZES_FOR(Transpose);
// all type conversion operators:
- CHECK_RANKS_FOR(Cast);
- CHECK_RANKS_FOR(Rescale);
+ CHECK_RANKS_AND_SIZES_FOR(Cast);
+ CHECK_RANKS_AND_SIZES_FOR(Rescale);
+ // control flow operators:
+ CHECK_RANKS_AND_SIZES_FOR(If);
// all data nodes operators:
- CHECK_RANKS_FOR(Const);
- CHECK_RANKS_FOR(Identity);
+ CHECK_RANKS_AND_SIZES_FOR(Const);
+ CHECK_RANKS_AND_SIZES_FOR(Identity);
----------------
lhutton1 wrote:
Apologies for missing this previously - I think we're also missing rank checks for:
- `VARIABLE: initial_value`
- `VARIABLE_WRITE: input1`
- `VARIABLE_READ: output1`
https://github.com/llvm/llvm-project/pull/128074
More information about the Mlir-commits
mailing list