[llvm] r324903 - [AArch64] Refactor identification of SIMD immediates
Hans Wennborg via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 13 10:17:29 PST 2018
Reverted in r325034 due to http://llvm.org/PR36369
On Mon, Feb 12, 2018 at 5:41 PM, Evandro Menezes via llvm-commits
<llvm-commits at lists.llvm.org> wrote:
> Author: evandro
> Date: Mon Feb 12 08:41:41 2018
> New Revision: 324903
>
> URL: http://llvm.org/viewvc/llvm-project?rev=324903&view=rev
> Log:
> [AArch64] Refactor identification of SIMD immediates
>
> Get rid of icky goto loops and make the code easier to maintain (NFC).
>
> Differential revision: https://reviews.llvm.org/D42723
>
> Modified:
> llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
>
> Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=324903&r1=324902&r2=324903&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Feb 12 08:41:41 2018
> @@ -6234,96 +6234,235 @@ static bool resolveBuildVector(BuildVect
> return false;
> }
>
> +// Try 64-bit splatted SIMD immediate.
> +static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
> + const APInt &Bits) {
> + if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
> + uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
> + EVT VT = Op.getValueType();
> + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64;
> +
> + if (AArch64_AM::isAdvSIMDModImmType10(Value)) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType10(Value);
> +
> + SDLoc dl(Op);
> + SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
> + DAG.getConstant(Value, dl, MVT::i32));
> + return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> + }
> + }
> +
> + return SDValue();
> +}
> +
> +// Try 32-bit splatted SIMD immediate.
> +static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
> + const APInt &Bits,
> + const SDValue *LHS = nullptr) {
> + if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
> + uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
> + EVT VT = Op.getValueType();
> + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> + bool isAdvSIMDModImm = false;
> + uint64_t Shift;
> +
> + if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType1(Value);
> + Shift = 0;
> + }
> + else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType2(Value);
> + Shift = 8;
> + }
> + else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType3(Value);
> + Shift = 16;
> + }
> + else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType4(Value);
> + Shift = 24;
> + }
> +
> + if (isAdvSIMDModImm) {
> + SDLoc dl(Op);
> + SDValue Mov;
> +
> + if (LHS)
> + Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
> + DAG.getConstant(Value, dl, MVT::i32),
> + DAG.getConstant(Shift, dl, MVT::i32));
> + else
> + Mov = DAG.getNode(NewOp, dl, MovTy,
> + DAG.getConstant(Value, dl, MVT::i32),
> + DAG.getConstant(Shift, dl, MVT::i32));
> +
> + return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> + }
> + }
> +
> + return SDValue();
> +}
> +
> +// Try 16-bit splatted SIMD immediate.
> +static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
> + const APInt &Bits,
> + const SDValue *LHS = nullptr) {
> + if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
> + uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
> + EVT VT = Op.getValueType();
> + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> + bool isAdvSIMDModImm = false;
> + uint64_t Shift;
> +
> + if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType5(Value);
> + Shift = 0;
> + }
> + else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType6(Value);
> + Shift = 8;
> + }
> +
> + if (isAdvSIMDModImm) {
> + SDLoc dl(Op);
> + SDValue Mov;
> +
> + if (LHS)
> + Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
> + DAG.getConstant(Value, dl, MVT::i32),
> + DAG.getConstant(Shift, dl, MVT::i32));
> + else
> + Mov = DAG.getNode(NewOp, dl, MovTy,
> + DAG.getConstant(Value, dl, MVT::i32),
> + DAG.getConstant(Shift, dl, MVT::i32));
> +
> + return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> + }
> + }
> +
> + return SDValue();
> +}
> +
> +// Try 32-bit splatted SIMD immediate with shifted ones.
> +static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
> + SelectionDAG &DAG, const APInt &Bits) {
> + if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
> + uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
> + EVT VT = Op.getValueType();
> + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> + bool isAdvSIMDModImm = false;
> + uint64_t Shift;
> +
> + if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType7(Value);
> + Shift = 264;
> + }
> + else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType8(Value);
> + Shift = 272;
> + }
> +
> + if (isAdvSIMDModImm) {
> + SDLoc dl(Op);
> + SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
> + DAG.getConstant(Value, dl, MVT::i32),
> + DAG.getConstant(Shift, dl, MVT::i32));
> + return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> + }
> + }
> +
> + return SDValue();
> +}
> +
> +// Try 8-bit splatted SIMD immediate.
> +static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
> + const APInt &Bits) {
> + if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
> + uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
> + EVT VT = Op.getValueType();
> + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
> +
> + if (AArch64_AM::isAdvSIMDModImmType9(Value)) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType9(Value);
> +
> + SDLoc dl(Op);
> + SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
> + DAG.getConstant(Value, dl, MVT::i32));
> + return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> + }
> + }
> +
> + return SDValue();
> +}
> +
> +// Try FP splatted SIMD immediate.
> +static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
> + const APInt &Bits) {
> + if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
> + uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
> + EVT VT = Op.getValueType();
> + bool isWide = (VT.getSizeInBits() == 128);
> + MVT MovTy;
> + bool isAdvSIMDModImm = false;
> +
> + if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType11(Value);
> + MovTy = isWide ? MVT::v4f32 : MVT::v2f32;
> + }
> + else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) {
> + Value = AArch64_AM::encodeAdvSIMDModImmType12(Value);
> + MovTy = isWide ? MVT::v2f64 : MVT::f64;
> + }
> +
> + if (isAdvSIMDModImm) {
> + SDLoc dl(Op);
> + SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
> + DAG.getConstant(Value, dl, MVT::i32));
> + return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> + }
> + }
> +
> + return SDValue();
> +}
> +
> SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
> SelectionDAG &DAG) const {
> - BuildVectorSDNode *BVN =
> - dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
> SDValue LHS = Op.getOperand(0);
> - SDLoc dl(Op);
> EVT VT = Op.getValueType();
>
> + BuildVectorSDNode *BVN =
> + dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
> + if (!BVN) {
> + // AND commutes, so try swapping the operands.
> + LHS = Op.getOperand(1);
> + BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
> + }
> if (!BVN)
> return Op;
>
> - APInt CnstBits(VT.getSizeInBits(), 0);
> + APInt DefBits(VT.getSizeInBits(), 0);
> APInt UndefBits(VT.getSizeInBits(), 0);
> - if (resolveBuildVector(BVN, CnstBits, UndefBits)) {
> - // We only have BIC vector immediate instruction, which is and-not.
> - CnstBits = ~CnstBits;
> + if (resolveBuildVector(BVN, DefBits, UndefBits)) {
> + SDValue NewOp;
>
> - // We make use of a little bit of goto ickiness in order to avoid having to
> - // duplicate the immediate matching logic for the undef toggled case.
> - bool SecondTry = false;
> - AttemptModImm:
> -
> - if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
> - CnstBits = CnstBits.zextOrTrunc(64);
> - uint64_t CnstVal = CnstBits.getZExtValue();
> -
> - if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(16, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(24, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> - }
> -
> - if (SecondTry)
> - goto FailedModImm;
> - SecondTry = true;
> - CnstBits = ~UndefBits;
> - goto AttemptModImm;
> + // We only have BIC vector immediate instruction, which is and-not.
> + DefBits = ~DefBits;
> + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG,
> + DefBits, &LHS)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG,
> + DefBits, &LHS)))
> + return NewOp;
> + else {
> + DefBits = ~UndefBits;
> + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG,
> + DefBits, &LHS)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG,
> + DefBits, &LHS)))
> + return NewOp;
> + }
> }
>
> -// We can always fall back to a non-immediate AND.
> -FailedModImm:
> + // We can always fall back to a non-immediate AND.
> return Op;
> }
>
> @@ -6434,96 +6573,40 @@ SDValue AArch64TargetLowering::LowerVect
> return Res;
> }
>
> - BuildVectorSDNode *BVN =
> - dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
> - SDValue LHS = Op.getOperand(1);
> - SDLoc dl(Op);
> + SDValue LHS = Op.getOperand(0);
> EVT VT = Op.getValueType();
>
> - // OR commutes, so try swapping the operands.
> + BuildVectorSDNode *BVN =
> + dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
> if (!BVN) {
> - LHS = Op.getOperand(0);
> - BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
> + // OR commutes, so try swapping the operands.
> + LHS = Op.getOperand(1);
> + BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
> }
> if (!BVN)
> return Op;
>
> - APInt CnstBits(VT.getSizeInBits(), 0);
> + APInt DefBits(VT.getSizeInBits(), 0);
> APInt UndefBits(VT.getSizeInBits(), 0);
> - if (resolveBuildVector(BVN, CnstBits, UndefBits)) {
> - // We make use of a little bit of goto ickiness in order to avoid having to
> - // duplicate the immediate matching logic for the undef toggled case.
> - bool SecondTry = false;
> - AttemptModImm:
> -
> - if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
> - CnstBits = CnstBits.zextOrTrunc(64);
> - uint64_t CnstVal = CnstBits.getZExtValue();
> -
> - if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(16, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(24, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> - }
> -
> - if (SecondTry)
> - goto FailedModImm;
> - SecondTry = true;
> - CnstBits = UndefBits;
> - goto AttemptModImm;
> + if (resolveBuildVector(BVN, DefBits, UndefBits)) {
> + SDValue NewOp;
> +
> + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
> + DefBits, &LHS)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
> + DefBits, &LHS)))
> + return NewOp;
> + else {
> + DefBits = UndefBits;
> + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
> + DefBits, &LHS)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
> + DefBits, &LHS)))
> + return NewOp;
> + }
> }
>
> -// We can always fall back to a non-immediate OR.
> -FailedModImm:
> + // We can always fall back to a non-immediate OR.
> return Op;
> }
>
> @@ -6555,222 +6638,51 @@ SDValue AArch64TargetLowering::LowerBUIL
> SelectionDAG &DAG) const {
> SDLoc dl(Op);
> EVT VT = Op.getValueType();
> +
> Op = NormalizeBuildVector(Op, DAG);
> BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
> -
> - APInt CnstBits(VT.getSizeInBits(), 0);
> + APInt DefBits(VT.getSizeInBits(), 0);
> APInt UndefBits(VT.getSizeInBits(), 0);
> - if (resolveBuildVector(BVN, CnstBits, UndefBits)) {
> - // We make use of a little bit of goto ickiness in order to avoid having to
> - // duplicate the immediate matching logic for the undef toggled case.
> - bool SecondTry = false;
> - AttemptModImm:
> -
> - if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
> - CnstBits = CnstBits.zextOrTrunc(64);
> - uint64_t CnstVal = CnstBits.getZExtValue();
> -
> - // Certain magic vector constants (used to express things like NOT
> - // and NEG) are passed through unmodified. This allows codegen patterns
> - // for these operations to match. Special-purpose patterns will lower
> - // these immediates to MOVIs if it proves necessary.
> - if (VT.isInteger() && (CnstVal == 0 || CnstVal == ~0ULL))
> - return Op;
> -
> - // The many faces of MOVI...
> - if (AArch64_AM::isAdvSIMDModImmType10(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType10(CnstVal);
> - if (VT.getSizeInBits() == 128) {
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::v2i64,
> - DAG.getConstant(CnstVal, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - // Support the V64 version via subregister insertion.
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::f64,
> - DAG.getConstant(CnstVal, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(16, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(24, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType7(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(264, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType8(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(272, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType9(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType9(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
> - SDValue Mov = DAG.getNode(AArch64ISD::MOVI, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - // The few faces of FMOV...
> - if (AArch64_AM::isAdvSIMDModImmType11(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType11(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4f32 : MVT::v2f32;
> - SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType12(CnstVal) &&
> - VT.getSizeInBits() == 128) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType12(CnstVal);
> - SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MVT::v2f64,
> - DAG.getConstant(CnstVal, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - // The many faces of MVNI...
> - CnstVal = ~CnstVal;
> - if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(16, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(24, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(0, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(8, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType7(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(264, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> -
> - if (AArch64_AM::isAdvSIMDModImmType8(CnstVal)) {
> - CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal);
> - MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
> - SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy,
> - DAG.getConstant(CnstVal, dl, MVT::i32),
> - DAG.getConstant(272, dl, MVT::i32));
> - return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
> - }
> - }
> -
> - if (SecondTry)
> - goto FailedModImm;
> - SecondTry = true;
> - CnstBits = UndefBits;
> - goto AttemptModImm;
> + if (resolveBuildVector(BVN, DefBits, UndefBits)) {
> + // Certain magic vector constants (used to express things like NOT
> + // and NEG) are passed through unmodified. This allows codegen patterns
> + // for these operations to match. Special-purpose patterns will lower
> + // these immediates to MOVI if it proves necessary.
> + uint64_t DefVal = DefBits.zextOrTrunc(64).getZExtValue();
> + if (DefBits.getHiBits(64) == DefBits.getLoBits(64) &&
> + VT.isInteger() && (DefVal == 0 || DefVal == UINT64_MAX))
> + return Op;
> +
> + SDValue NewOp;
> + if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
> + return NewOp;
> +
> + DefBits = ~DefBits;
> + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
> + return NewOp;
> +
> + DefBits = UndefBits;
> + if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
> + return NewOp;
> +
> + DefBits = ~UndefBits;
> + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
> + (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
> + return NewOp;
> }
> -FailedModImm:
>
> // Scan through the operands to find some interesting properties we can
> // exploit:
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list