[llvm] dfc8ab2 - Revert "Revert "[AArch64] Select SMULL for zero extended vectors when top bit is zero""

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 12 06:56:32 PST 2022


Reminder to please always mention the reason for the revert in the
commit message.

On Mon, Dec 12, 2022 at 5:46 PM Zain Jaffal via llvm-commits
<llvm-commits at lists.llvm.org> wrote:
>
>
> Author: Zain Jaffal
> Date: 2022-12-12T14:45:27Z
> New Revision: dfc8ab2e25dac2baddb36023991e6f88e12eb4bb
>
> URL: https://github.com/llvm/llvm-project/commit/dfc8ab2e25dac2baddb36023991e6f88e12eb4bb
> DIFF: https://github.com/llvm/llvm-project/commit/dfc8ab2e25dac2baddb36023991e6f88e12eb4bb.diff
>
> LOG: Revert "Revert "[AArch64] Select SMULL for zero extended vectors when top bit is zero""
>
> This reverts commit c07a01c2bb0d1f95689f809fd5be23829e364393.
>
> Added:
>
>
> Modified:
>     llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
>     llvm/test/CodeGen/AArch64/aarch64-smull.ll
>
> Removed:
>
>
>
> ################################################################################
> diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
> index 6ed58ab5662d..ac55c284b919 100644
> --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
> +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
> @@ -4526,7 +4526,7 @@ SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
>  }
>
>  static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
> -                                 bool &IsMLA) {
> +                                 SDLoc DL, bool &IsMLA) {
>    bool IsN0SExt = isSignExtended(N0, DAG);
>    bool IsN1SExt = isSignExtended(N1, DAG);
>    if (IsN0SExt && IsN1SExt)
> @@ -4538,6 +4538,23 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
>    if (IsN0ZExt && IsN1ZExt)
>      return AArch64ISD::UMULL;
>
> +  // Select SMULL if we can replace zext with sext.
> +  if ((IsN0SExt && IsN1ZExt) || (IsN0ZExt && IsN1SExt)) {
> +    SDValue ZextOperand;
> +    if (IsN0ZExt)
> +      ZextOperand = N0->getOperand(0);
> +    else
> +      ZextOperand = N1->getOperand(0);
> +    if (DAG.SignBitIsZero(ZextOperand)) {
> +      SDNode *NewSext =
> +          DAG.getSExtOrTrunc(ZextOperand, DL, N0->getValueType(0)).getNode();
> +      if (IsN0ZExt)
> +        N0 = NewSext;
> +      else
> +        N1 = NewSext;
> +      return AArch64ISD::SMULL;
> +    }
> +  }
>    if (!IsN1SExt && !IsN1ZExt)
>      return 0;
>    // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
> @@ -4575,7 +4592,8 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
>    SDNode *N0 = Op.getOperand(0).getNode();
>    SDNode *N1 = Op.getOperand(1).getNode();
>    bool isMLA = false;
> -  unsigned NewOpc = selectUmullSmull(N0, N1, DAG, isMLA);
> +  SDLoc DL(Op);
> +  unsigned NewOpc = selectUmullSmull(N0, N1, DAG, DL, isMLA);
>
>    if (!NewOpc) {
>      if (VT == MVT::v2i64)
> @@ -4587,7 +4605,6 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
>    }
>
>    // Legalize to a S/UMULL instruction
> -  SDLoc DL(Op);
>    SDValue Op0;
>    SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
>    if (!isMLA) {
>
> diff  --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
> index bdc156f8b05d..59d39458d4c5 100644
> --- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
> +++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
> @@ -50,14 +50,10 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
>  ; CHECK-LABEL: smull_zext_v8i8_v8i32:
>  ; CHECK:       // %bb.0:
>  ; CHECK-NEXT:    ldr d0, [x0]
> -; CHECK-NEXT:    ldr q1, [x1]
> +; CHECK-NEXT:    ldr q2, [x1]
>  ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
> -; CHECK-NEXT:    sshll v2.4s, v1.4h, #0
> -; CHECK-NEXT:    sshll2 v1.4s, v1.8h, #0
> -; CHECK-NEXT:    ushll2 v3.4s, v0.8h, #0
> -; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
> -; CHECK-NEXT:    mul v1.4s, v3.4s, v1.4s
> -; CHECK-NEXT:    mul v0.4s, v0.4s, v2.4s
> +; CHECK-NEXT:    smull2 v1.4s, v0.8h, v2.8h
> +; CHECK-NEXT:    smull v0.4s, v0.4h, v2.4h
>  ; CHECK-NEXT:    ret
>    %load.A = load <8 x i8>, <8 x i8>* %A
>    %load.B = load <8 x i16>, <8 x i16>* %B
> @@ -70,15 +66,11 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
>  define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i8>* %B) nounwind {
>  ; CHECK-LABEL: smull_zext_v8i8_v8i32_sext_first_operand:
>  ; CHECK:       // %bb.0:
> -; CHECK-NEXT:    ldr d1, [x1]
> -; CHECK-NEXT:    ldr q0, [x0]
> -; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
> -; CHECK-NEXT:    sshll v2.4s, v0.4h, #0
> -; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
> -; CHECK-NEXT:    ushll2 v3.4s, v1.8h, #0
> -; CHECK-NEXT:    ushll v4.4s, v1.4h, #0
> -; CHECK-NEXT:    mul v1.4s, v0.4s, v3.4s
> -; CHECK-NEXT:    mul v0.4s, v2.4s, v4.4s
> +; CHECK-NEXT:    ldr d0, [x1]
> +; CHECK-NEXT:    ldr q2, [x0]
> +; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
> +; CHECK-NEXT:    smull2 v1.4s, v2.8h, v0.8h
> +; CHECK-NEXT:    smull v0.4s, v2.4h, v0.4h
>  ; CHECK-NEXT:    ret
>    %load.A = load <8 x i16>, <8 x i16>* %A
>    %load.B = load <8 x i8>, <8 x i8>* %B
> @@ -116,9 +108,7 @@ define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind {
>  ; CHECK-NEXT:    ldr s0, [x0]
>  ; CHECK-NEXT:    ldr d1, [x1]
>  ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
> -; CHECK-NEXT:    sshll v1.4s, v1.4h, #0
> -; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
> -; CHECK-NEXT:    mul v0.4s, v0.4s, v1.4s
> +; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
>  ; CHECK-NEXT:    ret
>    %load.A = load <4 x i8>, <4 x i8>* %A
>    %load.B = load <4 x i16>, <4 x i16>* %B
> @@ -156,16 +146,7 @@ define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounw
>  ; CHECK-NEXT:    ldr d0, [x0]
>  ; CHECK-NEXT:    ldr d1, [x1]
>  ; CHECK-NEXT:    bic v0.2s, #128, lsl #24
> -; CHECK-NEXT:    sshll v1.2d, v1.2s, #0
> -; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
> -; CHECK-NEXT:    fmov x9, d1
> -; CHECK-NEXT:    fmov x10, d0
> -; CHECK-NEXT:    mov x8, v1.d[1]
> -; CHECK-NEXT:    mov x11, v0.d[1]
> -; CHECK-NEXT:    smull x9, w10, w9
> -; CHECK-NEXT:    smull x8, w11, w8
> -; CHECK-NEXT:    fmov d0, x9
> -; CHECK-NEXT:    mov v0.d[1], x8
> +; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
>  ; CHECK-NEXT:    ret
>    %load.A = load <2 x i32>, <2 x i32>* %A
>    %and.A = and <2 x i32> %load.A, <i32 u0x7FFFFFFF, i32 u0x7FFFFFFF>
>
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits


More information about the llvm-commits mailing list