[llvm] r316947 - [SelectionDAG] Add VSELECT demanded elts support to computeKnownBits
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 30 12:31:08 PDT 2017
Author: rksimon
Date: Mon Oct 30 12:31:08 2017
New Revision: 316947
URL: http://llvm.org/viewvc/llvm-project?rev=316947&view=rev
Log:
[SelectionDAG] Add VSELECT demanded elts support to computeKnownBits
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=316947&r1=316946&r2=316947&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Mon Oct 30 12:31:08 2017
@@ -2417,22 +2417,22 @@ void SelectionDAG::computeKnownBits(SDVa
}
case ISD::SELECT:
case ISD::VSELECT:
- computeKnownBits(Op.getOperand(2), Known, Depth+1);
+ computeKnownBits(Op.getOperand(2), Known, DemandedElts, Depth+1);
// If we don't know any bits, early out.
if (Known.isUnknown())
break;
- computeKnownBits(Op.getOperand(1), Known2, Depth+1);
+ computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth+1);
// Only known if known in both the LHS and RHS.
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
break;
case ISD::SELECT_CC:
- computeKnownBits(Op.getOperand(3), Known, Depth+1);
+ computeKnownBits(Op.getOperand(3), Known, DemandedElts, Depth+1);
// If we don't know any bits, early out.
if (Known.isUnknown())
break;
- computeKnownBits(Op.getOperand(2), Known2, Depth+1);
+ computeKnownBits(Op.getOperand(2), Known2, DemandedElts, Depth+1);
// Only known if known in both the LHS and RHS.
Known.One &= Known2.One;
Modified: llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-bits-vector.ll?rev=316947&r1=316946&r2=316947&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-bits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-bits-vector.ll Mon Oct 30 12:31:08 2017
@@ -618,11 +618,7 @@ define <4 x float> @knownbits_and_select
; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vpsrld $16, %xmm0, %xmm0
-; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: retl
@@ -634,11 +630,7 @@ define <4 x float> @knownbits_and_select
; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X64-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vpsrld $16, %xmm0, %xmm0
-; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a2, <i32 65535, i32 -1, i32 255, i32 -1>
%2 = and <4 x i32> %a3, <i32 255, i32 -1, i32 65535, i32 -1>
@@ -664,11 +656,7 @@ define <4 x float> @knownbits_lshr_and_s
; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vpsrld $16, %xmm0, %xmm0
-; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: retl
@@ -682,11 +670,7 @@ define <4 x float> @knownbits_lshr_and_s
; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X64-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vpsrld $16, %xmm0, %xmm0
-; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-NEXT: retq
%1 = lshr <4 x i32> %a2, <i32 5, i32 1, i32 5, i32 1>
%2 = and <4 x i32> %a3, <i32 255, i32 -1, i32 65535, i32 -1>
More information about the llvm-commits
mailing list