[llvm] 3369e47 - [DAG] Allow XOR(X, MIN_SIGNED_VALUE) to perform AddLike folds

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 6 02:37:18 PDT 2022


Author: Simon Pilgrim
Date: 2022-04-06T10:37:11+01:00
New Revision: 3369e474bbd1172175945437783b6841979e654a

URL: https://github.com/llvm/llvm-project/commit/3369e474bbd1172175945437783b6841979e654a
DIFF: https://github.com/llvm/llvm-project/commit/3369e474bbd1172175945437783b6841979e654a.diff

LOG: [DAG] Allow XOR(X,MIN_SIGNED_VALUE) to perform AddLike folds

As raised on PR52267, XOR(X,MIN_SIGNED_VALUE) can be treated as ADD(X,MIN_SIGNED_VALUE), so let these cases use the 'AddLike' folds, similar to how we perform no-common-bits OR(X,Y) cases.

define i8 @src(i8 %x) {
  %r = xor i8 %x, 128
  ret i8 %r
}
=>
define i8 @tgt(i8 %x) {
  %r = add i8 %x, 128
  ret i8 %r
}
Transformation seems to be correct!

https://alive2.llvm.org/ce/z/qV46E2

Differential Revision: https://reviews.llvm.org/D122754

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/test/CodeGen/ARM/dsp-mlal.ll
    llvm/test/CodeGen/X86/setcc-combine.ll
    llvm/test/CodeGen/X86/xor-lea.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 05719a92f8370..bd38e1669144a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2348,6 +2348,15 @@ static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
   return SDValue();
 }
 
+static bool isADDLike(SDValue V, const SelectionDAG &DAG) {
+  unsigned Opcode = V.getOpcode();
+  if (Opcode == ISD::OR)
+    return DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1));
+  if (Opcode == ISD::XOR)
+    return isMinSignedConstant(V.getOperand(1));
+  return false;
+}
+
 /// Try to fold a node that behaves like an ADD (note that N isn't necessarily
 /// an ISD::ADD here, it could for example be an ISD::OR if we know that there
 /// are no common bits set in the operands).
@@ -2423,9 +2432,10 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
 
     // Fold (add (or x, c0), c1) -> (add x, (c0 + c1)) if (or x, c0) is
     // equivalent to (add x, c0).
-    if (N0.getOpcode() == ISD::OR &&
-        isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true) &&
-        DAG.haveNoCommonBitsSet(N0.getOperand(0), N0.getOperand(1))) {
+    // Fold (add (xor x, c0), c1) -> (add x, (c0 + c1)) if (xor x, c0) is
+    // equivalent to (add x, c0).
+    if (isADDLike(N0, DAG) && 
+        isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) {
       if (SDValue Add0 = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT,
                                                     {N1, N0.getOperand(1)}))
         return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Add0);
@@ -2442,10 +2452,11 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
 
     // Reassociate (add (or x, c), y) -> (add add(x, y), c)) if (or x, c) is
     // equivalent to (add x, c).
+    // Reassociate (add (xor x, c), y) -> (add add(x, y), c)) if (xor x, c) is
+    // equivalent to (add x, c).
     auto ReassociateAddOr = [&](SDValue N0, SDValue N1) {
-      if (N0.getOpcode() == ISD::OR && N0.hasOneUse() &&
-          isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true) &&
-          DAG.haveNoCommonBitsSet(N0.getOperand(0), N0.getOperand(1))) {
+      if (isADDLike(N0, DAG) && N0.hasOneUse() &&
+          isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) {
         return DAG.getNode(ISD::ADD, DL, VT,
                            DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)),
                            N0.getOperand(1));
@@ -8297,6 +8308,13 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
   if (SDValue RXOR = reassociateOps(ISD::XOR, DL, N0, N1, N->getFlags()))
     return RXOR;
 
+  // look for 'add-like' folds:
+  // XOR(N0,MIN_SIGNED_VALUE) == ADD(N0,MIN_SIGNED_VALUE)
+  if ((!LegalOperations || TLI.isOperationLegal(ISD::ADD, VT)) &&
+      isMinSignedConstant(N1))
+    if (SDValue Combined = visitADDLike(N))
+      return Combined;
+
   // fold !(x cc y) -> (x !cc y)
   unsigned N0Opcode = N0.getOpcode();
   SDValue LHS, RHS, CC;

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 8f3d5fcd5b044..c1eedd35d7743 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5058,6 +5058,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::FREEZE:
     assert(VT == Operand.getValueType() && "Unexpected VT!");
+    if (isGuaranteedNotToBeUndefOrPoison(Operand))
+      return Operand;
     break;
   case ISD::TokenFactor:
   case ISD::MERGE_VALUES:

diff  --git a/llvm/test/CodeGen/ARM/dsp-mlal.ll b/llvm/test/CodeGen/ARM/dsp-mlal.ll
index 53593034b3a6f..5a4947b77d6e3 100644
--- a/llvm/test/CodeGen/ARM/dsp-mlal.ll
+++ b/llvm/test/CodeGen/ARM/dsp-mlal.ll
@@ -272,23 +272,21 @@ entry:
 define hidden i32 @NOT_SMMLA(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
 ; DSP-LABEL: NOT_SMMLA:
 ; DSP:       @ %bb.0: @ %entry
-; DSP-NEXT:    smmul r1, r2, r1
-; DSP-NEXT:    eor r1, r1, #-2147483648
-; DSP-NEXT:    add r0, r1
+; DSP-NEXT:    smmla r0, r1, r2, r0
+; DSP-NEXT:    add.w r0, r0, #-2147483648
 ; DSP-NEXT:    bx lr
 ;
 ; ARM7-LABEL: NOT_SMMLA:
 ; ARM7:       @ %bb.0: @ %entry
-; ARM7-NEXT:    smmul r1, r2, r1
-; ARM7-NEXT:    eor r1, r1, #-2147483648
-; ARM7-NEXT:    add r0, r1, r0
+; ARM7-NEXT:    smmla r0, r2, r1, r0
+; ARM7-NEXT:    add r0, r0, #-2147483648
 ; ARM7-NEXT:    bx lr
 ;
 ; NODSP-LABEL: NOT_SMMLA:
 ; NODSP:       @ %bb.0: @ %entry
 ; NODSP-NEXT:    smull r1, r2, r2, r1
-; NODSP-NEXT:    eor r1, r2, #-2147483648
-; NODSP-NEXT:    add r0, r1
+; NODSP-NEXT:    add r0, r2
+; NODSP-NEXT:    add.w r0, r0, #-2147483648
 ; NODSP-NEXT:    bx lr
 entry:
   %conv = sext i32 %b to i64

diff  --git a/llvm/test/CodeGen/X86/setcc-combine.ll b/llvm/test/CodeGen/X86/setcc-combine.ll
index 44994ab07ea6b..9a317c5571c35 100644
--- a/llvm/test/CodeGen/X86/setcc-combine.ll
+++ b/llvm/test/CodeGen/X86/setcc-combine.ll
@@ -267,9 +267,9 @@ define void @test_i1_uge(i1 *%A2) {
 define i64 @PR40657(i8 %var2, i8 %var9) {
 ; CHECK-LABEL: PR40657:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    notb %sil
-; CHECK-NEXT:    addb %dil, %sil
-; CHECK-NEXT:    movzbl %sil, %eax
+; CHECK-NEXT:    addb %sil, %dil
+; CHECK-NEXT:    incb %dil
+; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    andl $1, %eax
 ; CHECK-NEXT:    retq
   %var6 = trunc i8 %var9 to i1

diff  --git a/llvm/test/CodeGen/X86/xor-lea.ll b/llvm/test/CodeGen/X86/xor-lea.ll
index d0e3e1a522183..8e8ab5625b558 100644
--- a/llvm/test/CodeGen/X86/xor-lea.ll
+++ b/llvm/test/CodeGen/X86/xor-lea.ll
@@ -143,17 +143,15 @@ define i8 @xor_add_sminval_i8(i8 %x, i8 %y) {
 define i16 @xor_sub_sminval_i16(i16 %x) {
 ; X86-LABEL: xor_sub_sminval_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    addl $-2, %eax
-; X86-NEXT:    xorl $32768, %eax # imm = 0x8000
+; X86-NEXT:    movl $32766, %eax # imm = 0x7FFE
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: xor_sub_sminval_i16:
 ; X64:       # %bb.0:
 ; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    leal -2(%rdi), %eax
-; X64-NEXT:    xorl $32768, %eax # imm = 0x8000
+; X64-NEXT:    leal 32766(%rdi), %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
   %s = sub i16 %x, 2
@@ -164,16 +162,14 @@ define i16 @xor_sub_sminval_i16(i16 %x) {
 define i32 @xor_add_sminval_i32(i32 %x) {
 ; X86-LABEL: xor_add_sminval_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $512, %eax # imm = 0x200
+; X86-NEXT:    movl $-2147483136, %eax # imm = 0x80000200
 ; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: xor_add_sminval_i32:
 ; X64:       # %bb.0:
 ; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    leal 512(%rdi), %eax
-; X64-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT:    leal -2147483136(%rdi), %eax
 ; X64-NEXT:    retq
   %s = add i32 %x, 512
   %r = xor i32 %s, 2147483648
@@ -228,17 +224,15 @@ define i8 @sub_xor_sminval_i8(i8 %x, i8 %y) {
 define i16 @add_xor_sminval_i16(i16 %x) {
 ; X86-LABEL: add_xor_sminval_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $-32768, %eax # imm = 0x8000
-; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    addl $2, %eax
+; X86-NEXT:    movl $-32766, %eax # imm = 0x8002
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_xor_sminval_i16:
 ; X64:       # %bb.0:
 ; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    xorl $-32768, %edi # imm = 0x8000
-; X64-NEXT:    leal 2(%rdi), %eax
+; X64-NEXT:    leal -32766(%rdi), %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
   %r = xor i16 %x, 32768
@@ -249,16 +243,14 @@ define i16 @add_xor_sminval_i16(i16 %x) {
 define i32 @sub_xor_sminval_i32(i32 %x) {
 ; X86-LABEL: sub_xor_sminval_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
-; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    addl $-512, %eax # imm = 0xFE00
+; X86-NEXT:    movl $2147483136, %eax # imm = 0x7FFFFE00
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sub_xor_sminval_i32:
 ; X64:       # %bb.0:
 ; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    movl $-512, %eax # imm = 0xFE00
-; X64-NEXT:    leal -2147483648(%rdi,%rax), %eax
+; X64-NEXT:    leal 2147483136(%rdi), %eax
 ; X64-NEXT:    retq
   %r = xor i32 %x, 2147483648
   %s = sub i32 %r, 512
@@ -269,16 +261,16 @@ define i64 @add_xor_sminval_i64(i64 %x, i64 %y) {
 ; X86-LABEL: add_xor_sminval_i64:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $-2147483648, %edx # imm = 0x80000000
-; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    addl $-2147483648, %edx # imm = 0x80000000
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: add_xor_sminval_i64:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
-; X64-NEXT:    xorq %rdi, %rax
+; X64-NEXT:    addq %rdi, %rax
 ; X64-NEXT:    addq %rsi, %rax
 ; X64-NEXT:    retq
   %r = xor i64 %x, -9223372036854775808


        


More information about the llvm-commits mailing list