[Mlir-commits] [mlir] [mlir][tosa] Add several level checks (PR #128074)

Luke Hutton llvmlistbot at llvm.org
Tue Feb 25 03:59:26 PST 2025


================
@@ -147,107 +152,149 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
     return true;
   }
 
-  bool levelCheckRank(Operation *op, const Value &v,
-                      const std::string &checkDesc) {
+  bool levelCheckListSize(Operation *op, int32_t v,
+                          const std::string &checkDesc) {
+    if (v > tosaLevel.MAX_TENSOR_LIST_SIZE) {
+      op->emitOpError() << "failed level check for MAX_TENSOR_LIST_SIZE: "
+                        << checkDesc;
+      return false;
+    }
+    return true;
+  }
+
+  bool levelCheckRankAndSizes(Operation *op, const Value &v,
+                              const std::string &operandOrResult) {
     if (ShapedType type = dyn_cast<ShapedType>(v.getType())) {
       if (!type.hasRank()) {
         op->emitOpError() << "failed level check: unranked tensor";
         return false;
       }
       if (type.getRank() > tosaLevel.MAX_RANK) {
-        op->emitOpError() << "failed level check: " << checkDesc;
+        op->emitOpError() << "failed level check: " << operandOrResult
+                          << " rank(shape) <= MAX_RANK";
+        return false;
+      }
+
+      const int64_t max_dim = (INT64_C(1) << tosaLevel.MAX_LOG2_SIZE) - 1;
+      const int64_t max_size =
+          (INT64_C(1) << (tosaLevel.MAX_LOG2_SIZE + 1)) - 1;
+
+      auto shape = type.getShape();
+      for (auto dim : shape) {
+        if (mlir::ShapedType::isDynamic(dim)) {
+          op->emitOpError() << "failed level check: " << operandOrResult
+                            << " shape dimension cannot be dynamic";
+          return false;
+        }
+        if (dim > max_dim) {
+          op->emitOpError() << "failed level check: " << operandOrResult
+                            << " shape dimension <= (1<<MAX_LOG2_SIZE) - 1";
+          return false;
+        }
+      }
+
+      int64_t element_bits = type.getElementTypeBitWidth();
+      int64_t element_bytes = std::max(INT64_C(1), element_bits / 8);
+      int64_t size = element_bytes * type.getNumElements();
+      if (size > max_size) {
+        op->emitOpError()
+            << "failed level check: " << operandOrResult
+            << " tensor size (in bytes) <= (1<<MAX_LOG2_SIZE+1) - 1";
         return false;
       }
     }
     return true;
   }
 
   template <typename T>
-  bool levelCheckRanksFor(Operation *op) {
+  bool levelCheckRanksAndSizesFor(Operation *op) {
     if (dyn_cast<T>(op)) {
       // level check ranks of all operands and results
       for (auto v : op->getOperands()) {
-        if (!levelCheckRank(op, v, "operand rank(shape) <= MAX_RANK"))
+        if (!levelCheckRankAndSizes(op, v, "operand"))
           return false;
       }
       for (auto v : op->getResults()) {
-        if (!levelCheckRank(op, v, "result rank(shape) <= MAX_RANK"))
+        if (!levelCheckRankAndSizes(op, v, "result"))
           return false;
       }
     }
     return true;
   }
 
-  bool levelCheckRanks(Operation *op) {
-#define CHECK_RANKS_FOR(tosaOp)                                                \
-  if (!levelCheckRanksFor<tosaOp##Op>(op))                                     \
+  bool levelCheckRanksAndSizes(Operation *op) {
+#define CHECK_RANKS_AND_SIZES_FOR(tosaOp)                                      \
+  if (!levelCheckRanksAndSizesFor<tosaOp##Op>(op))                             \
     return false;
 
     // tensor operators:
-    CHECK_RANKS_FOR(ArgMax);
+    CHECK_RANKS_AND_SIZES_FOR(ArgMax);
----------------
lhutton1 wrote:

The expected rank for ARGMAX output is `0 to MAX_RANK - 1`, which is different to what's being checked in `levelCheckRankAndSizes`

https://github.com/llvm/llvm-project/pull/128074


More information about the Mlir-commits mailing list