[llvm] 074e4ae - [DAG] foldABSToABD - support abs(*ext(x) - *ext(y)) -> zext(abd*(x, y)) from different extension source types (#71670)

via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 14 04:56:47 PST 2023


Author: Simon Pilgrim
Date: 2023-11-14T12:56:42Z
New Revision: 074e4ae0e769e831ca06beb7b37814d67b3d56b6

URL: https://github.com/llvm/llvm-project/commit/074e4ae0e769e831ca06beb7b37814d67b3d56b6
DIFF: https://github.com/llvm/llvm-project/commit/074e4ae0e769e831ca06beb7b37814d67b3d56b6.diff

LOG: [DAG] foldABSToABD - support abs(*ext(x) - *ext(y)) -> zext(abd*(x, y)) from different extension source types (#71670)

We currently limit the fold to cases where we're extending from the same source type, but we can safely perform this using the wider of mismatching source types (we're really just interested in having extension bits on both sources), ensuring we don't create additional extensions/truncations.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/sve-abd.ll
    llvm/test/CodeGen/X86/abds.ll
    llvm/test/CodeGen/X86/abdu.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a867d88f76c0cf6..f33110c92355a23 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10923,11 +10923,12 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N) {
 
   // fold abs(sext(x) - sext(y)) -> zext(abds(x, y))
   // fold abs(zext(x) - zext(y)) -> zext(abdu(x, y))
-  // NOTE: Extensions must be equivalent.
-  if (VT1 == VT2 && hasOperation(ABDOpcode, VT1)) {
-    Op0 = Op0.getOperand(0);
-    Op1 = Op1.getOperand(0);
-    SDValue ABD = DAG.getNode(ABDOpcode, DL, VT1, Op0, Op1);
+  EVT MaxVT = VT1.bitsGT(VT2) ? VT1 : VT2;
+  if ((VT1 == MaxVT || Op0->hasOneUse()) &&
+      (VT2 == MaxVT || Op1->hasOneUse()) && hasOperation(ABDOpcode, MaxVT)) {
+    SDValue ABD = DAG.getNode(ABDOpcode, DL, MaxVT,
+                              DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op0),
+                              DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op1));
     ABD = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, ABD);
     return DAG.getZExtOrTrunc(ABD, DL, SrcVT);
   }

diff  --git a/llvm/test/CodeGen/AArch64/sve-abd.ll b/llvm/test/CodeGen/AArch64/sve-abd.ll
index 31a226ab74c9475..95aec0a49261997 100644
--- a/llvm/test/CodeGen/AArch64/sve-abd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-abd.ll
@@ -248,17 +248,9 @@ define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
 define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <vscale x 4 x i8> %b) #0 {
 ; CHECK-LABEL: uabd_non_matching_extension:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    and z1.s, z1.s, #0xff
-; CHECK-NEXT:    uunpkhi z2.d, z0.s
-; CHECK-NEXT:    uunpklo z0.d, z0.s
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    uunpkhi z3.d, z1.s
-; CHECK-NEXT:    uunpklo z1.d, z1.s
-; CHECK-NEXT:    sub z0.d, z0.d, z1.d
-; CHECK-NEXT:    sub z1.d, z2.d, z3.d
-; CHECK-NEXT:    abs z1.d, p0/m, z1.d
-; CHECK-NEXT:    abs z0.d, p0/m, z0.d
-; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT:    uabd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>

diff  --git a/llvm/test/CodeGen/X86/abds.ll b/llvm/test/CodeGen/X86/abds.ll
index c5be2474c84d405..39ac47e99e6e989 100644
--- a/llvm/test/CodeGen/X86/abds.ll
+++ b/llvm/test/CodeGen/X86/abds.ll
@@ -50,16 +50,13 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
 ;
 ; X64-LABEL: abd_ext_i8_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    # kill: def $esi killed $esi def $rsi
-; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    movsbq %dil, %rax
-; X64-NEXT:    movswq %si, %rcx
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    subq %rcx, %rdx
-; X64-NEXT:    negq %rdx
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    cmovleq %rdx, %rax
-; X64-NEXT:    # kill: def $al killed $al killed $rax
+; X64-NEXT:    movswl %si, %eax
+; X64-NEXT:    movsbl %dil, %ecx
+; X64-NEXT:    subl %eax, %ecx
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovsl %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
   %aext = sext i8 %a to i64
   %bext = sext i16 %b to i64
@@ -132,32 +129,25 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
 define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
 ; X86-LABEL: abd_ext_i16_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sarl $31, %esi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    subl %ecx, %edx
+; X86-NEXT:    negl %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %edx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    cmovlel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
-; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i16_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    movswq %di, %rax
-; X64-NEXT:    movslq %esi, %rcx
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    subq %rcx, %rdx
-; X64-NEXT:    negq %rdx
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    cmovleq %rdx, %rax
+; X64-NEXT:    movslq %esi, %rax
+; X64-NEXT:    movswl %di, %ecx
+; X64-NEXT:    movslq %ecx, %rcx
+; X64-NEXT:    subq %rax, %rcx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovsq %rcx, %rax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $rax
 ; X64-NEXT:    retq
   %aext = sext i16 %a to i64
@@ -231,31 +221,24 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
 define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
 ; X86-LABEL: abd_ext_i32_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sarl $31, %esi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    subl %ecx, %edx
+; X86-NEXT:    negl %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %edx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    subl %esi, %eax
-; X86-NEXT:    popl %esi
+; X86-NEXT:    cmovlel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    # kill: def $esi killed $esi def $rsi
-; X64-NEXT:    movslq %edi, %rax
-; X64-NEXT:    movswq %si, %rcx
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    subq %rcx, %rdx
-; X64-NEXT:    negq %rdx
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    cmovleq %rdx, %rax
+; X64-NEXT:    movslq %edi, %rcx
+; X64-NEXT:    movswl %si, %eax
+; X64-NEXT:    cltq
+; X64-NEXT:    subq %rax, %rcx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovsq %rcx, %rax
 ; X64-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-NEXT:    retq
   %aext = sext i32 %a to i64

diff  --git a/llvm/test/CodeGen/X86/abdu.ll b/llvm/test/CodeGen/X86/abdu.ll
index fe805528c435a52..11719be4ab5cd0c 100644
--- a/llvm/test/CodeGen/X86/abdu.ll
+++ b/llvm/test/CodeGen/X86/abdu.ll
@@ -50,14 +50,13 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
 ;
 ; X64-LABEL: abd_ext_i8_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    movzwl %si, %ecx
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    subq %rcx, %rdx
-; X64-NEXT:    negq %rdx
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    cmovbeq %rdx, %rax
-; X64-NEXT:    # kill: def $al killed $al killed $rax
+; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    movzbl %dil, %ecx
+; X64-NEXT:    subl %eax, %ecx
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovsl %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
   %aext = zext i8 %a to i64
   %bext = zext i16 %b to i64
@@ -130,25 +129,24 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
 define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
 ; X86-LABEL: abd_ext_i16_i32:
 ; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    sbbl %ecx, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    subl %ecx, %edx
+; X86-NEXT:    negl %edx
 ; X86-NEXT:    subl %ecx, %eax
+; X86-NEXT:    cmovbel %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i16_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movzwl %di, %eax
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    subq %rcx, %rdx
-; X64-NEXT:    negq %rdx
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    cmovbeq %rdx, %rax
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    movzwl %di, %ecx
+; X64-NEXT:    subq %rax, %rcx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovsq %rcx, %rax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $rax
 ; X64-NEXT:    retq
   %aext = zext i16 %a to i64
@@ -224,23 +222,21 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    subl %ecx, %edx
+; X86-NEXT:    negl %edx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %edx, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %eax
-; X86-NEXT:    subl %edx, %eax
+; X86-NEXT:    cmovbel %edx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i32_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    movzwl %si, %ecx
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    subq %rcx, %rdx
-; X64-NEXT:    negq %rdx
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    cmovbeq %rdx, %rax
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    subq %rax, %rcx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovsq %rcx, %rax
 ; X64-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-NEXT:    retq
   %aext = zext i32 %a to i64


        


More information about the llvm-commits mailing list