[Mlir-commits] [mlir] [mlir][vector] add tensor.concat, bitcast, expand_shape, collapse_shape vectorization support (PR #97297)
Oleksandr Alex Zinenko
llvmlistbot at llvm.org
Tue Jul 9 02:11:44 PDT 2024
================
@@ -1931,6 +2134,108 @@ vectorizePadOpPrecondition(tensor::PadOp padOp,
return success();
}
+static LogicalResult
+lowerExpandOpPrecondition(tensor::ExpandShapeOp expandOp,
+ ArrayRef<int64_t> inputVectorSizes) {
+ auto resultType = expandOp->getResultTypes()[0];
+ auto resultShape = mlir::dyn_cast<ShapedType>(resultType);
+ // check reassociation
+ llvm::SmallVector<int64_t> associateIndices;
+ for (auto &attr : expandOp.getReassociation()) {
+ for (auto &indice : mlir::dyn_cast<ArrayAttr>(attr)) {
+ associateIndices.push_back(mlir::dyn_cast<IntegerAttr>(indice).getInt());
+ }
+ }
+
+ if (llvm::any_of(associateIndices,
+ [](int64_t x) { return x == ShapedType::kDynamic; })) {
+ LDBG("Reassociation must be static: " << expandOp << "\n");
+ return failure();
+ }
+ // check input and output shape
+ if (!resultShape.hasStaticShape() ||
+ !expandOp.getSrcType().hasStaticShape()) {
+ LDBG("Input and output shape must be static: " << expandOp << "\n");
+ return failure();
+ }
+ if (!inputVectorSizes.empty() &&
+ failed(vector::isValidMaskedInputVector(resultShape.getShape(),
+ inputVectorSizes)))
+ return failure();
+
+ return success();
+}
+
+static LogicalResult
+lowerBitcastOpPrecondition(tensor::BitcastOp bitCastOp,
+ ArrayRef<int64_t> inputVectorSizes) {
+ auto resultType = bitCastOp->getResultTypes()[0];
+ auto resultShapeType = mlir::dyn_cast<ShapedType>(resultType);
+ auto srcType = bitCastOp.getSource().getType();
+ auto srcShapeType = mlir::dyn_cast<ShapedType>(srcType);
+
+ bool isStaticInputOutput =
+ resultShapeType.hasStaticShape() && srcShapeType.hasStaticShape();
+ if (!isStaticInputOutput) {
+ LDBG("Input and output shape must be static: " << bitCastOp << "\n");
+ return failure();
+ }
+
+ if (!inputVectorSizes.empty() &&
+ failed(vector::isValidMaskedInputVector(resultShapeType.getShape(),
+ inputVectorSizes)))
+ return failure();
+ return success();
+}
+
+static LogicalResult
+lowerCollapseShapeOpPrecondition(tensor::CollapseShapeOp collapseOp,
+ ArrayRef<int64_t> inputVectorSizes) {
+ auto resultType = collapseOp->getResultTypes()[0];
+ auto resultShapeType = mlir::dyn_cast<ShapedType>(resultType);
+ auto srcShapeType = collapseOp.getSrcType();
+
+ bool isStaticInputOutput =
+ resultShapeType.hasStaticShape() && srcShapeType.hasStaticShape();
+ if (!isStaticInputOutput) {
+ LDBG("Input and output shape must be static: " << collapseOp << "\n");
+ return failure();
+ }
+
+ if (!inputVectorSizes.empty() &&
+ failed(vector::isValidMaskedInputVector(resultShapeType.getShape(),
+ inputVectorSizes)))
+ return failure();
+ return success();
+}
+
+static LogicalResult
+lowerConcatOpPrecondition(tensor::ConcatOp concatOp,
+ ArrayRef<int64_t> inputVectorSizes) {
+ if (!inputVectorSizes.empty()) {
+ LDBG("Concat operation do not support specify inputVectorSizes: "
+ << concatOp << "\n");
+ }
+ for (auto x : concatOp->getOperands()) {
+ auto type = mlir::dyn_cast<ShapedType>(x.getType());
+ if (!type) {
+ LDBG("Operation type error: " << concatOp << "\n");
+ return failure();
+ }
----------------
ftynse wrote:
Can this ever happen?
https://github.com/llvm/llvm-project/pull/97297
More information about the Mlir-commits
mailing list