[llvm] r316944 - [SelectionDAG] Add VSELECT support to computeKnownBits

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 30 12:08:22 PDT 2017


Author: rksimon
Date: Mon Oct 30 12:08:21 2017
New Revision: 316944

URL: http://llvm.org/viewvc/llvm-project?rev=316944&view=rev
Log:
[SelectionDAG] Add VSELECT support to computeKnownBits 

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
    llvm/trunk/test/CodeGen/X86/avx512-schedule.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=316944&r1=316943&r2=316944&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Mon Oct 30 12:08:21 2017
@@ -2416,6 +2416,7 @@ void SelectionDAG::computeKnownBits(SDVa
     break;
   }
   case ISD::SELECT:
+  case ISD::VSELECT:
     computeKnownBits(Op.getOperand(2), Known, Depth+1);
     // If we don't know any bits, early out.
     if (Known.isUnknown())

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=316944&r1=316943&r2=316944&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Mon Oct 30 12:08:21 2017
@@ -1829,7 +1829,7 @@ define <16 x float> @ubto16f32(<16 x i32
 ; ALL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; ALL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; ALL-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; ALL-NEXT:    vcvtdq2ps %zmm0, %zmm0
 ; ALL-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x float>
@@ -1857,10 +1857,10 @@ define <16 x double> @ubto16f64(<16 x i3
 ; VL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; VL-NEXT:    movl {{.*}}(%rip), %eax
 ; VL-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z}
-; VL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; VL-NEXT:    vcvtdq2pd %ymm0, %zmm0
 ; VL-NEXT:    kshiftrw $8, %k1, %k1
 ; VL-NEXT:    vpbroadcastd %eax, %ymm1 {%k1} {z}
-; VL-NEXT:    vcvtudq2pd %ymm1, %zmm1
+; VL-NEXT:    vcvtdq2pd %ymm1, %zmm1
 ; VL-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x double>
@@ -1884,7 +1884,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
 ; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; VL-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; VL-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; VL-NEXT:    retq
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x float>
@@ -1907,7 +1907,7 @@ define <8 x double> @ubto8f64(<8 x i32>
 ; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; VL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; VL-NEXT:    vcvtdq2pd %ymm0, %zmm0
 ; VL-NEXT:    retq
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x double>
@@ -1928,7 +1928,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
 ; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; VL-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; VL-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; VL-NEXT:    retq
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x float>
@@ -1949,7 +1949,7 @@ define <4 x double> @ubto4f64(<4 x i32>
 ; VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; VL-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; VL-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; VL-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; VL-NEXT:    retq
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x double>
@@ -1977,7 +1977,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
 ; VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; VL-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
 ; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; VL-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; VL-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; VL-NEXT:    retq
   %mask = icmp ult <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x float>
@@ -1999,7 +1999,7 @@ define <2 x double> @ubto2f64(<2 x i32>
 ; VLDQ-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; VLDQ-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
 ; VLDQ-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
-; VLDQ-NEXT:    vcvtuqq2pd %xmm0, %xmm0
+; VLDQ-NEXT:    vcvtqq2pd %xmm0, %xmm0
 ; VLDQ-NEXT:    retq
 ;
 ; VLNODQ-LABEL: ubto2f64:
@@ -2009,9 +2009,9 @@ define <2 x double> @ubto2f64(<2 x i32>
 ; VLNODQ-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
 ; VLNODQ-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
 ; VLNODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; VLNODQ-NEXT:    vcvtusi2sdq %rax, %xmm2, %xmm1
+; VLNODQ-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
 ; VLNODQ-NEXT:    vmovq %xmm0, %rax
-; VLNODQ-NEXT:    vcvtusi2sdq %rax, %xmm2, %xmm0
+; VLNODQ-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm0
 ; VLNODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; VLNODQ-NEXT:    retq
   %mask = icmp ult <2 x i32> %a, zeroinitializer

Modified: llvm/trunk/test/CodeGen/X86/avx512-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-schedule.ll?rev=316944&r1=316943&r2=316944&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-schedule.ll Mon Oct 30 12:08:21 2017
@@ -2832,7 +2832,7 @@ define <16 x float> @ubto16f32(<16 x i32
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto16f32:
@@ -2840,7 +2840,7 @@ define <16 x float> @ubto16f32(<16 x i32
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
-; SKX-NEXT:    vcvtudq2ps %zmm0, %zmm0 # sched: [4:0.33]
+; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x float>
@@ -2854,10 +2854,10 @@ define <16 x double> @ubto16f64(<16 x i3
 ; GENERIC-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; GENERIC-NEXT:    movl {{.*}}(%rip), %eax # sched: [5:0.50]
 ; GENERIC-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0
 ; GENERIC-NEXT:    kshiftrw $8, %k1, %k1
 ; GENERIC-NEXT:    vpbroadcastd %eax, %ymm1 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2pd %ymm1, %zmm1
+; GENERIC-NEXT:    vcvtdq2pd %ymm1, %zmm1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto16f64:
@@ -2866,10 +2866,10 @@ define <16 x double> @ubto16f64(<16 x i3
 ; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    movl {{.*}}(%rip), %eax # sched: [5:0.50]
 ; SKX-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z} # sched: [3:1.00]
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0 # sched: [7:1.00]
+; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    kshiftrw $8, %k1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd %eax, %ymm1 {%k1} {z} # sched: [3:1.00]
-; SKX-NEXT:    vcvtudq2pd %ymm1, %zmm1 # sched: [7:1.00]
+; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm1 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x double>
@@ -2882,7 +2882,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; GENERIC-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto8f32:
@@ -2890,7 +2890,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
-; SKX-NEXT:    vcvtudq2ps %ymm0, %ymm0 # sched: [4:0.33]
+; SKX-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x float>
@@ -2903,7 +2903,7 @@ define <8 x double> @ubto8f64(<8 x i32>
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto8f64:
@@ -2911,7 +2911,7 @@ define <8 x double> @ubto8f64(<8 x i32>
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0 # sched: [7:1.00]
+; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x double>
@@ -2924,7 +2924,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; GENERIC-NEXT:    vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto4f32:
@@ -2932,7 +2932,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
-; SKX-NEXT:    vcvtudq2ps %xmm0, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vcvtdq2ps %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x float>
@@ -2945,7 +2945,7 @@ define <4 x double> @ubto4f64(<4 x i32>
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; GENERIC-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto4f64:
@@ -2953,7 +2953,7 @@ define <4 x double> @ubto4f64(<4 x i32>
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
-; SKX-NEXT:    vcvtudq2pd %xmm0, %ymm0 # sched: [7:1.00]
+; SKX-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x double>
@@ -2967,7 +2967,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
 ; GENERIC-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
 ; GENERIC-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; GENERIC-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; GENERIC-NEXT:    vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto2f32:
@@ -2976,7 +2976,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
 ; SKX-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
-; SKX-NEXT:    vcvtudq2ps %xmm0, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vcvtdq2ps %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp ult <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x float>
@@ -2990,7 +2990,7 @@ define <2 x double> @ubto2f64(<2 x i32>
 ; GENERIC-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
 ; GENERIC-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
 ; GENERIC-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [4:0.50]
-; GENERIC-NEXT:    vcvtuqq2pd %xmm0, %xmm0
+; GENERIC-NEXT:    vcvtqq2pd %xmm0, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto2f64:
@@ -2999,7 +2999,7 @@ define <2 x double> @ubto2f64(<2 x i32>
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
 ; SKX-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
-; SKX-NEXT:    vcvtuqq2pd %xmm0, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vcvtqq2pd %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %mask = icmp ult <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x double>




More information about the llvm-commits mailing list