[llvm] c1ecd0a - [DAGCombiner] Add fold for `~x + x` -> `-1`

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 6 18:31:28 PST 2023


Author: Noah Goldstein
Date: 2023-03-06T20:30:27-06:00
New Revision: c1ecd0a3f49a39295cde65334357be8fee2ecab9

URL: https://github.com/llvm/llvm-project/commit/c1ecd0a3f49a39295cde65334357be8fee2ecab9
DIFF: https://github.com/llvm/llvm-project/commit/c1ecd0a3f49a39295cde65334357be8fee2ecab9.diff

LOG: [DAGCombiner] Add fold for `~x + x` -> `-1`

This is generally done by the InstCombine, but can be emitted as an
intermediate step and is cheap to handle.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D145177

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/X86/combine-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index bbec05c6706c..1d3ad4971748 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2612,6 +2612,10 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
       !DAG.isConstantIntBuildVectorOrConstantInt(N1))
     return DAG.getNode(ISD::ADD, DL, VT, N1, N0);
 
+  if (areBitwiseNotOfEachother(N0, N1))
+    return DAG.getConstant(APInt::getAllOnes(VT.getScalarSizeInBits()),
+                           SDLoc(N), VT);
+
   // fold vector ops
   if (VT.isVector()) {
     if (SDValue FoldedVOp = SimplifyVBinOp(N, DL))

diff  --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll
index e038d4672572..72b987d452f5 100644
--- a/llvm/test/CodeGen/X86/combine-add.ll
+++ b/llvm/test/CodeGen/X86/combine-add.ll
@@ -514,16 +514,12 @@ define i1 @PR51238(i1 %b, i8 %x, i8 %y, i8 %z) {
 define <2 x i64> @add_vec_x_notx(<2 x i64> %v0) nounwind {
 ; SSE-LABEL: add_vec_x_notx:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE-NEXT:    pxor %xmm0, %xmm1
-; SSE-NEXT:    paddq %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: add_vec_x_notx:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = xor <2 x i64> %v0, <i64 -1, i64 -1>
   %y = add <2 x i64> %v0, %x
@@ -533,16 +529,12 @@ define <2 x i64> @add_vec_x_notx(<2 x i64> %v0) nounwind {
 define <2 x i64> @add_vec_notx_x(<2 x i64> %v0) nounwind {
 ; SSE-LABEL: add_vec_notx_x:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE-NEXT:    pxor %xmm0, %xmm1
-; SSE-NEXT:    paddq %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: add_vec_notx_x:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = xor <2 x i64> %v0, <i64 -1, i64 -1>
   %y = add <2 x i64> %x, %v0
@@ -552,9 +544,7 @@ define <2 x i64> @add_vec_notx_x(<2 x i64> %v0) nounwind {
 define i64 @add_x_notx(i64 %v0) nounwind {
 ; CHECK-LABEL: add_x_notx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    notq %rax
-; CHECK-NEXT:    addq %rdi, %rax
+; CHECK-NEXT:    movq $-1, %rax
 ; CHECK-NEXT:    retq
   %x = xor i64 %v0, -1
   %y = add i64 %v0, %x
@@ -564,9 +554,7 @@ define i64 @add_x_notx(i64 %v0) nounwind {
 define i64 @add_notx_x(i64 %v0) nounwind {
 ; CHECK-LABEL: add_notx_x:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    notq %rax
-; CHECK-NEXT:    addq %rdi, %rax
+; CHECK-NEXT:    movq $-1, %rax
 ; CHECK-NEXT:    retq
   %x = xor i64 %v0, -1
   %y = add i64 %x, %v0


        


More information about the llvm-commits mailing list