[llvm] r284424 - [DAG] use isConstOrConstSplat in ComputeNumSignBits to optimize SRA

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 17 13:41:40 PDT 2016


Author: spatel
Date: Mon Oct 17 15:41:39 2016
New Revision: 284424

URL: http://llvm.org/viewvc/llvm-project?rev=284424&view=rev
Log:
[DAG] use isConstOrConstSplat in ComputeNumSignBits to optimize SRA

The scalar version of this pattern was noted in:
https://reviews.llvm.org/D25485

and fixed with:
https://reviews.llvm.org/rL284395

More refactoring of the constant/splat helpers is needed and will happen in follow-up patches.

Differential Revision: https://reviews.llvm.org/D25685

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/trunk/test/CodeGen/X86/sar_fold64.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=284424&r1=284423&r2=284424&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Mon Oct 17 15:41:39 2016
@@ -2697,7 +2697,7 @@ unsigned SelectionDAG::ComputeNumSignBit
     if (Tmp2 == 1) return 1;
 
     // Handle NEG.
-    if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
+    if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
       if (CLHS->isNullValue()) {
         APInt KnownZero, KnownOne;
         computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);

Modified: llvm/trunk/test/CodeGen/X86/sar_fold64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sar_fold64.ll?rev=284424&r1=284423&r2=284424&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sar_fold64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sar_fold64.ll Mon Oct 17 15:41:39 2016
@@ -80,16 +80,6 @@ define <4 x i32> @all_sign_bit_ashr_vec(
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    psubd %xmm0, %xmm1
 ; CHECK-NEXT:    movdqa %xmm1, %xmm0
-; CHECK-NEXT:    psrad $31, %xmm0
-; CHECK-NEXT:    movdqa %xmm1, %xmm2
-; CHECK-NEXT:    movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; CHECK-NEXT:    movdqa %xmm1, %xmm0
-; CHECK-NEXT:    psrad $5, %xmm0
-; CHECK-NEXT:    psrad $1, %xmm1
-; CHECK-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; CHECK-NEXT:    retq
 ;
   %and = and <4 x i32> %x, <i32 1, i32 1, i32 1 , i32 1>




More information about the llvm-commits mailing list