[llvm] [AArch64] Add @llvm.experimental.vector.match (PR #101974)
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 30 11:16:57 PDT 2024
================
@@ -6364,6 +6364,91 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
DAG.getNode(AArch64ISD::CTTZ_ELTS, dl, MVT::i64, CttzOp);
return DAG.getZExtOrTrunc(NewCttzElts, dl, Op.getValueType());
}
+ case Intrinsic::experimental_vector_match: {
+ SDValue ID =
+ DAG.getTargetConstant(Intrinsic::aarch64_sve_match, dl, MVT::i64);
+
+ auto Op1 = Op.getOperand(1);
+ auto Op2 = Op.getOperand(2);
+ auto Mask = Op.getOperand(3);
+
+ EVT Op1VT = Op1.getValueType();
+ EVT Op2VT = Op2.getValueType();
+ EVT ResVT = Op.getValueType();
+
+ assert((Op1VT.getVectorElementType() == MVT::i8 ||
+ Op1VT.getVectorElementType() == MVT::i16) &&
+ "Expected 8-bit or 16-bit characters.");
+ assert(Op1VT.getVectorElementType() == Op2VT.getVectorElementType() &&
+ "Operand type mismatch.");
+ assert(!Op2VT.isScalableVector() && "Search vector cannot be scalable.");
+
+ // Note: Currently Op1 needs to be v16i8, v8i16, or the scalable versions.
+ // In the future we could support other types (e.g. v8i8).
+ assert(Op1VT.getSizeInBits().getKnownMinValue() == 128 &&
+ "Unsupported first operand type.");
+
+ // Scalable vector type used to wrap operands.
+ // A single container is enough for both operands because ultimately the
+ // operands will have to be wrapped to the same type (nxv16i8 or nxv8i16).
+ EVT OpContainerVT = Op1VT.isScalableVector()
+ ? Op1VT
+ : getContainerForFixedLengthVector(DAG, Op1VT);
+
+ // Wrap Op2 in a scalable register, and splat it if necessary.
+ if (Op1VT.getVectorMinNumElements() == Op2VT.getVectorNumElements()) {
+ // If Op1 and Op2 have the same number of elements we can trivially wrap
+ // Op2 in an SVE register.
+ Op2 = convertToScalableVector(DAG, OpContainerVT, Op2);
+ // If the result is scalable, we need to broadcast Op2 to a full SVE
+ // register.
+ if (ResVT.isScalableVector())
+ Op2 = DAG.getNode(AArch64ISD::DUPLANE128, dl, OpContainerVT, Op2,
+ DAG.getTargetConstant(0, dl, MVT::i64));
+ } else {
+ // If Op1 and Op2 have different number of elements, we need to broadcast
+ // Op2. Ideally we would use a AArch64ISD::DUPLANE* node for this
+ // similarly to the above, but unfortunately we seem to be missing some
+ // patterns for this. So, in alternative, we splat Op2 through a splat of
+ // a scalable vector extract. This idiom, though a bit more verbose, is
----------------
paulwalker-arm wrote:
I'd rather get this fixed. Is there something I can help with?
https://github.com/llvm/llvm-project/pull/101974
More information about the llvm-commits
mailing list