[llvm] r263417 - AVX512: icmp operation should be always lowered to CMPM (AVX-512) instruction on SKX.

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 14 03:26:39 PDT 2016


Author: ibreger
Date: Mon Mar 14 05:26:39 2016
New Revision: 263417

URL: http://llvm.org/viewvc/llvm-project?rev=263417&view=rev
Log:
AVX512: icmp operation should be always lowered to CMPM (AVX-512) instruction on SKX.

implemented by delena

Differential Revision: http://reviews.llvm.org/D18054

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx512-ext.ll
    llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=263417&r1=263416&r2=263417&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Mar 14 05:26:39 2016
@@ -1445,6 +1445,8 @@ X86TargetLowering::X86TargetLowering(con
       setOperationAction(ISD::UINT_TO_FP,       MVT::v4i32, Legal);
       setOperationAction(ISD::FP_TO_SINT,       MVT::v4i32, Legal);
       setOperationAction(ISD::FP_TO_UINT,       MVT::v4i32, Legal);
+      setOperationAction(ISD::ZERO_EXTEND,      MVT::v4i32, Custom);
+      setOperationAction(ISD::ZERO_EXTEND,      MVT::v2i64, Custom);
     }
     setOperationAction(ISD::TRUNCATE,           MVT::v8i1, Custom);
     setOperationAction(ISD::TRUNCATE,           MVT::v16i1, Custom);
@@ -1884,7 +1886,8 @@ X86TargetLowering::getPreferredVectorAct
   return TargetLoweringBase::getPreferredVectorAction(VT);
 }
 
-EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
+EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
+                                          LLVMContext& Context,
                                           EVT VT) const {
   if (!VT.isVector())
     return Subtarget.hasAVX512() ? MVT::i1: MVT::i8;
@@ -1892,7 +1895,7 @@ EVT X86TargetLowering::getSetCCResultTyp
   if (VT.isSimple()) {
     MVT VVT = VT.getSimpleVT();
     const unsigned NumElts = VVT.getVectorNumElements();
-    const MVT EltVT = VVT.getVectorElementType();
+    MVT EltVT = VVT.getVectorElementType();
     if (VVT.is512BitVector()) {
       if (Subtarget.hasAVX512())
         if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
@@ -1909,23 +1912,20 @@ EVT X86TargetLowering::getSetCCResultTyp
         }
     }
 
-    if (VVT.is256BitVector() || VVT.is128BitVector()) {
-      if (Subtarget.hasVLX())
-        if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
-            EltVT == MVT::f32 || EltVT == MVT::f64)
-          switch(NumElts) {
-          case 2: return MVT::v2i1;
-          case 4: return MVT::v4i1;
-          case 8: return MVT::v8i1;
-        }
-      if (Subtarget.hasBWI() && Subtarget.hasVLX())
-        if (EltVT == MVT::i8 || EltVT == MVT::i16)
-          switch(NumElts) {
-          case  8: return MVT::v8i1;
-          case 16: return MVT::v16i1;
-          case 32: return MVT::v32i1;
-        }
-    }
+    if (Subtarget.hasBWI() && Subtarget.hasVLX())
+      return MVT::getVectorVT(MVT::i1, NumElts);
+
+    if (!isTypeLegal(VT) && getTypeAction(Context, VT) == TypePromoteInteger) {
+      EVT LegalVT = getTypeToTransformTo(Context, VT);
+      EltVT = LegalVT.getVectorElementType().getSimpleVT();
+    }
+ 
+    if (Subtarget.hasVLX() && EltVT.getSizeInBits() >= 32)
+      switch(NumElts) {
+      case 2: return MVT::v2i1;
+      case 4: return MVT::v4i1;
+      case 8: return MVT::v8i1;
+      }
   }
 
   return VT.changeVectorElementTypeToInteger();
@@ -6750,15 +6750,16 @@ X86TargetLowering::LowerBUILD_VECTOR(SDV
       // it to i32 first.
       if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
         Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
-        if (VT.is256BitVector()) {
+        if (VT.getSizeInBits() >= 256) {
+          MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
           if (Subtarget.hasAVX()) {
-            Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v8i32, Item);
+            Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
             Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
           } else {
             // Without AVX, we need to extend to a 128-bit vector and then
             // insert into the 256-bit vector.
             Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
-            SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
+            SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
             Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
           }
         } else {

Modified: llvm/trunk/test/CodeGen/X86/avx512-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-ext.ll?rev=263417&r1=263416&r2=263417&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-ext.ll Mon Mar 14 05:26:39 2016
@@ -1900,6 +1900,67 @@ define <64 x i8> @zext_64xi1_to_64xi8(<6
   ret <64 x i8> %1
 }
 
+define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
+; KNL-LABEL: zext_32xi1_to_32xi16:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpcmpeqw %ymm2, %ymm0, %ymm0
+; KNL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; KNL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; KNL-NEXT:    vpcmpeqw %ymm3, %ymm1, %ymm1
+; KNL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: zext_32xi1_to_32xi16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %mask = icmp eq <32 x i16> %x, %y
+  %1 = zext <32 x i1> %mask to <32 x i16>
+  ret <32 x i16> %1
+}
+
+define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
+; KNL-LABEL: zext_16xi1_to_16xi16:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: zext_16xi1_to_16xi16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqw %ymm1, %ymm0, %k1
+; SKX-NEXT:    vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %mask = icmp eq <16 x i16> %x, %y
+  %1 = zext <16 x i1> %mask to <16 x i16>
+  ret <16 x i16> %1
+}
+
+
+define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
+; KNL-LABEL: zext_32xi1_to_32xi8:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpcmpeqw %ymm2, %ymm0, %ymm0
+; KNL-NEXT:    vpmovsxwd %ymm0, %zmm0
+; KNL-NEXT:    vpmovdb %zmm0, %xmm0
+; KNL-NEXT:    vpcmpeqw %ymm3, %ymm1, %ymm1
+; KNL-NEXT:    vpmovsxwd %ymm1, %zmm1
+; KNL-NEXT:    vpmovdb %zmm1, %xmm1
+; KNL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: zext_32xi1_to_32xi8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %mask = icmp eq <32 x i16> %x, %y
+  %1 = zext <32 x i1> %mask to <32 x i8>
+  ret <32 x i8> %1
+}
+
 define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
 ; KNL-LABEL: zext_4xi1_to_4x32:
 ; KNL:       ## BB#0:
@@ -1916,10 +1977,33 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x
 ; SKX-NEXT:    vmovdqa64 {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SKX-NEXT:    vpandq %xmm2, %xmm1, %xmm1
 ; SKX-NEXT:    vpandq %xmm2, %xmm0, %xmm0
-; SKX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; SKX-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; SKX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k1
+; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
 ; SKX-NEXT:    retq
   %mask = icmp eq <4 x i8> %x, %y
   %1 = zext <4 x i1> %mask to <4 x i32>
   ret <4 x i32> %1
 }
+
+define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
+; KNL-LABEL: zext_2xi1_to_2xi64:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; KNL-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; KNL-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; KNL-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: zext_2xi1_to_2xi64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vmovdqa64 {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SKX-NEXT:    vpandq %xmm2, %xmm1, %xmm1
+; SKX-NEXT:    vpandq %xmm2, %xmm0, %xmm0
+; SKX-NEXT:    vpcmpeqq %xmm1, %xmm0, %k1
+; SKX-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %mask = icmp eq <2 x i8> %x, %y
+  %1 = zext <2 x i1> %mask to <2 x i64>
+  ret <2 x i64> %1
+}

Modified: llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll?rev=263417&r1=263416&r2=263417&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll Mon Mar 14 05:26:39 2016
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=KNL
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=SKX
 
@@ -8,6 +9,13 @@ define <16 x float> @test1(<16 x float>
 ; KNL-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test1:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpleps %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = fcmp ole <16 x float> %x, %y
   %max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
   ret <16 x float> %max
@@ -20,6 +28,13 @@ define <8 x double> @test2(<8 x double>
 ; KNL-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test2:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmplepd %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = fcmp ole <8 x double> %x, %y
   %max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
   ret <8 x double> %max
@@ -32,6 +47,13 @@ define <16 x i32> @test3(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test3:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqd (%rdi), %zmm0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <16 x i32>, <16 x i32>* %yp, align 4
   %mask = icmp eq <16 x i32> %x, %y
   %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -45,6 +67,13 @@ define <16 x i32> @test4_unsigned(<16 x
 ; KNL-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test4_unsigned:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpnltud %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = icmp uge <16 x i32> %x, %y
   %max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
   ret <16 x i32> %max
@@ -57,6 +86,13 @@ define <8 x i64> @test5(<8 x i64> %x, <8
 ; KNL-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test5:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqq %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = icmp eq <8 x i64> %x, %y
   %max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
   ret <8 x i64> %max
@@ -69,6 +105,13 @@ define <8 x i64> @test6_unsigned(<8 x i6
 ; KNL-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test6_unsigned:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpnleuq %zmm1, %zmm0, %k1
+; SKX-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = icmp ugt <8 x i64> %x, %y
   %max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
   ret <8 x i64> %max
@@ -81,13 +124,14 @@ define <4 x float> @test7(<4 x float> %a
 ; KNL-NEXT:    vcmpltps %xmm2, %xmm0, %xmm2
 ; KNL-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
 ; KNL-NEXT:    retq
+;
 ; SKX-LABEL: test7:
 ; SKX:       ## BB#0:
-; SKX:    vxorps   %xmm2, %xmm2, %xmm2
-; SKX:    vcmpltps %xmm2, %xmm0, %k1 
-; SKX:    vmovaps  %xmm0, %xmm1 {%k1}
-; SKX:    vmovaps  %zmm1, %zmm0
-; SKX:    retq
+; SKX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vcmpltps %xmm2, %xmm0, %k1
+; SKX-NEXT:    vmovaps %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %mask = fcmp olt <4 x float> %a, zeroinitializer
   %c = select <4 x i1>%mask, <4 x float>%a, <4 x float>%b
@@ -101,13 +145,14 @@ define <2 x double> @test8(<2 x double>
 ; KNL-NEXT:    vcmpltpd %xmm2, %xmm0, %xmm2
 ; KNL-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; KNL-NEXT:    retq
+;
 ; SKX-LABEL: test8:
 ; SKX:       ## BB#0:
-; SKX: vxorpd  %xmm2, %xmm2, %xmm2
-; SKX: vcmpltpd    %xmm2, %xmm0, %k1 
-; SKX: vmovapd %xmm0, %xmm1 {%k1}
-; SKX: vmovaps %zmm1, %zmm0
-; SKX: retq
+; SKX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vcmpltpd %xmm2, %xmm0, %k1
+; SKX-NEXT:    vmovapd %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = fcmp olt <2 x double> %a, zeroinitializer
   %c = select <2 x i1>%mask, <2 x double>%a, <2 x double>%b
   ret <2 x double>%c
@@ -119,6 +164,13 @@ define <8 x i32> @test9(<8 x i32> %x, <8
 ; KNL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
 ; KNL-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test9:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqd %ymm1, %ymm0, %k1
+; SKX-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = icmp eq <8 x i32> %x, %y
   %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
   ret <8 x i32> %max
@@ -130,12 +182,13 @@ define <8 x float> @test10(<8 x float> %
 ; KNL-NEXT:    vcmpeqps %zmm1, %zmm0, %k1
 ; KNL-NEXT:    vblendmps %zmm0, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
+;
 ; SKX-LABEL: test10:
 ; SKX:       ## BB#0:
-; SKX: vcmpeqps    %ymm1, %ymm0, %k1 
-; SKX: vmovaps %ymm0, %ymm1 {%k1}
-; SKX: vmovaps %zmm1, %zmm0
-; SKX: retq
+; SKX-NEXT:    vcmpeqps %ymm1, %ymm0, %k1
+; SKX-NEXT:    vmovaps %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %mask = fcmp oeq <8 x float> %x, %y
   %max = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
@@ -147,6 +200,11 @@ define <8 x i32> @test11_unsigned(<8 x i
 ; KNL:       ## BB#0:
 ; KNL-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test11_unsigned:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
+; SKX-NEXT:    retq
   %mask = icmp ugt <8 x i32> %x, %y
   %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
   ret <8 x i32> %max
@@ -160,6 +218,14 @@ define i16 @test12(<16 x i64> %a, <16 x
 ; KNL-NEXT:    kunpckbw %k0, %k1, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test12:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqq %zmm2, %zmm0, %k0
+; SKX-NEXT:    vpcmpeqq %zmm3, %zmm1, %k1
+; SKX-NEXT:    kunpckbw %k0, %k1, %k0
+; SKX-NEXT:    kmovw %k0, %eax
+; SKX-NEXT:    retq
   %res = icmp eq <16 x i64> %a, %b
   %res1 = bitcast <16 x i1> %res to i16
   ret i16 %res1
@@ -197,6 +263,12 @@ define <16 x i32> @test13(<16 x float>%a
 ; KNL-NEXT:    vcmpeqps %zmm1, %zmm0, %k1
 ; KNL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test13:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpeqps %zmm1, %zmm0, %k1
+; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; SKX-NEXT:    retq
 {
   %cmpvector_i = fcmp oeq <16 x float> %a, %b
   %conv = zext <16 x i1> %cmpvector_i to <16 x i32>
@@ -212,6 +284,15 @@ define <16 x i32> @test14(<16 x i32>%a,
 ; KNL-NEXT:    knotw %k0, %k1
 ; KNL-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test14:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpsubd %zmm1, %zmm0, %zmm1
+; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0
+; SKX-NEXT:    knotw %k0, %k0
+; SKX-NEXT:    knotw %k0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1} {z}
+; SKX-NEXT:    retq
   %sub_r = sub <16 x i32> %a, %b
   %cmp.i2.i = icmp sgt <16 x i32> %sub_r, %a
   %sext.i3.i = sext <16 x i1> %cmp.i2.i to <16 x i32>
@@ -229,6 +310,15 @@ define <8 x i64> @test15(<8 x i64>%a, <8
 ; KNL-NEXT:    knotw %k0, %k1
 ; KNL-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test15:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpsubq %zmm1, %zmm0, %zmm1
+; SKX-NEXT:    vpcmpgtq %zmm0, %zmm1, %k0
+; SKX-NEXT:    knotb %k0, %k0
+; SKX-NEXT:    knotw %k0, %k1
+; SKX-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1} {z}
+; SKX-NEXT:    retq
   %sub_r = sub <8 x i64> %a, %b
   %cmp.i2.i = icmp sgt <8 x i64> %sub_r, %a
   %sext.i3.i = sext <8 x i1> %cmp.i2.i to <8 x i64>
@@ -244,6 +334,13 @@ define <16 x i32> @test16(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpled %zmm0, %zmm1, %k1
+; SKX-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask = icmp sge <16 x i32> %x, %y
   %max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
   ret <16 x i32> %max
@@ -256,6 +353,13 @@ define <16 x i32> @test17(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test17:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpgtd (%rdi), %zmm0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
   %mask = icmp sgt <16 x i32> %x, %y
   %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -269,6 +373,13 @@ define <16 x i32> @test18(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test18:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpled (%rdi), %zmm0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
   %mask = icmp sle <16 x i32> %x, %y
   %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -282,6 +393,13 @@ define <16 x i32> @test19(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test19:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpleud (%rdi), %zmm0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
   %mask = icmp ule <16 x i32> %x, %y
   %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -296,6 +414,14 @@ define <16 x i32> @test20(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test20:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
+; SKX-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask1 = icmp eq <16 x i32> %x1, %y1
   %mask0 = icmp eq <16 x i32> %x, %y
   %mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
@@ -311,6 +437,14 @@ define <8 x i64> @test21(<8 x i64> %x, <
 ; KNL-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vmovaps %zmm2, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test21:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpleq %zmm1, %zmm0, %k1
+; SKX-NEXT:    vpcmpleq %zmm2, %zmm3, %k1 {%k1}
+; SKX-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; SKX-NEXT:    vmovaps %zmm2, %zmm0
+; SKX-NEXT:    retq
   %mask1 = icmp sge <8 x i64> %x1, %y1
   %mask0 = icmp sle <8 x i64> %x, %y
   %mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
@@ -326,6 +460,14 @@ define <8 x i64> @test22(<8 x i64> %x, <
 ; KNL-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test22:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpgtq %zmm2, %zmm1, %k1
+; SKX-NEXT:    vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
+; SKX-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask1 = icmp sgt <8 x i64> %x1, %y1
   %y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
   %mask0 = icmp sgt <8 x i64> %x, %y
@@ -342,6 +484,14 @@ define <16 x i32> @test23(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test23:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpled %zmm1, %zmm2, %k1
+; SKX-NEXT:    vpcmpleud (%rdi), %zmm0, %k1 {%k1}
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask1 = icmp sge <16 x i32> %x1, %y1
   %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
   %mask0 = icmp ule <16 x i32> %x, %y
@@ -357,6 +507,13 @@ define <8 x i64> @test24(<8 x i64> %x, <
 ; KNL-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test24:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpeqq (%rdi){1to8}, %zmm0, %k1
+; SKX-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %yb = load i64, i64* %yb.ptr, align 4
   %y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
   %y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -372,6 +529,13 @@ define <16 x i32> @test25(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test25:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpled (%rdi){1to16}, %zmm0, %k1
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %yb = load i32, i32* %yb.ptr, align 4
   %y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
   %y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -388,6 +552,14 @@ define <16 x i32> @test26(<16 x i32> %x,
 ; KNL-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test26:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpled %zmm1, %zmm2, %k1
+; SKX-NEXT:    vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
+; SKX-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask1 = icmp sge <16 x i32> %x1, %y1
   %yb = load i32, i32* %yb.ptr, align 4
   %y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
@@ -401,11 +573,19 @@ define <16 x i32> @test26(<16 x i32> %x,
 define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
 ; KNL-LABEL: test27:
 ; KNL:       ## BB#0:
-; KNL-NEXT:    vpcmpleq        %zmm1, %zmm2, %k1
-; KNL-NEXT:    vpcmpleq        (%rdi){1to8}, %zmm0, %k1 {%k1}
+; KNL-NEXT:    vpcmpleq %zmm1, %zmm2, %k1
+; KNL-NEXT:    vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
 ; KNL-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL-NEXT:    retq
+;
+; SKX-LABEL: test27:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpleq %zmm1, %zmm2, %k1
+; SKX-NEXT:    vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
+; SKX-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %mask1 = icmp sge <8 x i64> %x1, %y1
   %yb = load i64, i64* %yb.ptr, align 4
   %y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
@@ -416,11 +596,23 @@ define <8 x i64> @test27(<8 x i64> %x, i
   ret <8 x i64> %max
 }
 
-; KNL-LABEL: test28
-; KNL: vpcmpgtq
-; KNL: vpcmpgtq
-; KNL: kxnorw
 define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) {
+; KNL-LABEL: test28:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0
+; KNL-NEXT:    vpcmpgtq %zmm3, %zmm2, %k1
+; KNL-NEXT:    kxnorw %k1, %k0, %k1
+; KNL-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
+; KNL-NEXT:    vpmovqd %zmm0, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test28:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0
+; SKX-NEXT:    vpcmpgtq %zmm3, %zmm2, %k1
+; SKX-NEXT:    kxnorb %k1, %k0, %k0
+; SKX-NEXT:    vpmovm2d %k0, %ymm0
+; SKX-NEXT:    retq
   %x_gt_y = icmp sgt <8 x i64> %x, %y
   %x1_gt_y1 = icmp sgt <8 x i64> %x1, %y1
   %res = icmp eq <8 x i1>%x_gt_y, %x1_gt_y1
@@ -428,11 +620,23 @@ define <8 x i32>@test28(<8 x i64> %x, <8
   ret <8 x i32> %resse
 }
 
-; KNL-LABEL: test29
-; KNL: vpcmpgtd
-; KNL: vpcmpgtd
-; KNL: kxorw
 define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) {
+; KNL-LABEL: test29:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
+; KNL-NEXT:    vpcmpgtd %zmm3, %zmm2, %k1
+; KNL-NEXT:    kxorw %k1, %k0, %k1
+; KNL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; KNL-NEXT:    vpmovdb %zmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test29:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
+; SKX-NEXT:    vpcmpgtd %zmm3, %zmm2, %k1
+; SKX-NEXT:    kxorw %k1, %k0, %k0
+; SKX-NEXT:    vpmovm2b %k0, %xmm0
+; SKX-NEXT:    retq
   %x_gt_y = icmp sgt <16 x i32> %x, %y
   %x1_gt_y1 = icmp sgt <16 x i32> %x1, %y1
   %res = icmp ne <16 x i1>%x_gt_y, %x1_gt_y1
@@ -441,9 +645,18 @@ define <16 x i8>@test29(<16 x i32> %x, <
 }
 
 define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
+; KNL-LABEL: test30:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm2
+; KNL-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; KNL-NEXT:    retq
+;
 ; SKX-LABEL: test30:
-; SKX: vcmpeqpd   %ymm1, %ymm0, %k1 
-; SKX: vmovapd    %ymm0, %ymm1 {%k1}
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpeqpd %ymm1, %ymm0, %k1
+; SKX-NEXT:    vmovapd %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %mask = fcmp oeq <4 x double> %x, %y
   %max = select <4 x i1> %mask, <4 x double> %x, <4 x double> %y
@@ -451,9 +664,18 @@ define <4 x double> @test30(<4 x double>
 }
 
 define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp) nounwind {
-; SKX-LABEL: test31:     
-; SKX: vcmpltpd        (%rdi), %xmm0, %k1 
-; SKX: vmovapd %xmm0, %xmm1 {%k1}
+; KNL-LABEL: test31:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltpd (%rdi), %xmm0, %xmm2
+; KNL-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test31:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltpd (%rdi), %xmm0, %k1
+; SKX-NEXT:    vmovapd %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %y = load <2 x double>, <2 x double>* %yp, align 4
   %mask = fcmp olt <2 x double> %x, %y
@@ -462,9 +684,18 @@ define <2 x double> @test31(<2 x double>
 }
 
 define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp) nounwind {
+; KNL-LABEL: test32:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltpd (%rdi), %ymm0, %ymm2
+; KNL-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; KNL-NEXT:    retq
+;
 ; SKX-LABEL: test32:
-; SKX: vcmpltpd        (%rdi), %ymm0, %k1 
-; SKX: vmovapd %ymm0, %ymm1 {%k1}
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltpd (%rdi), %ymm0, %k1
+; SKX-NEXT:    vmovapd %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %y = load <4 x double>, <4 x double>* %yp, align 4
   %mask = fcmp ogt <4 x double> %y, %x
@@ -473,9 +704,19 @@ define <4 x double> @test32(<4 x double>
 }
 
 define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp) nounwind {
-; SKX-LABEL: test33:     
-; SKX: vcmpltpd        (%rdi), %zmm0, %k1 
-; SKX: vmovapd %zmm0, %zmm1 {%k1}
+; KNL-LABEL: test33:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltpd (%rdi), %zmm0, %k1
+; KNL-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovaps %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test33:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltpd (%rdi), %zmm0, %k1
+; SKX-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <8 x double>, <8 x double>* %yp, align 4
   %mask = fcmp olt <8 x double> %x, %y
   %max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %x1
@@ -483,9 +724,18 @@ define <8 x double> @test33(<8 x double>
 }
 
 define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) nounwind {
-; SKX-LABEL: test34:     
-; SKX: vcmpltps        (%rdi), %xmm0, %k1 
-; SKX: vmovaps %xmm0, %xmm1 {%k1}
+; KNL-LABEL: test34:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltps (%rdi), %xmm0, %xmm2
+; KNL-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test34:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltps (%rdi), %xmm0, %k1
+; SKX-NEXT:    vmovaps %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <4 x float>, <4 x float>* %yp, align 4
   %mask = fcmp olt <4 x float> %x, %y
   %max = select <4 x i1> %mask, <4 x float> %x, <4 x float> %x1
@@ -493,9 +743,19 @@ define <4 x float> @test34(<4 x float> %
 }
 
 define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
+; KNL-LABEL: test35:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vmovups (%rdi), %ymm2
+; KNL-NEXT:    vcmpltps %zmm2, %zmm0, %k1
+; KNL-NEXT:    vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT:    retq
+;
 ; SKX-LABEL: test35:
-; SKX: vcmpltps        (%rdi), %ymm0, %k1 
-; SKX: vmovaps %ymm0, %ymm1 {%k1}
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltps (%rdi), %ymm0, %k1
+; SKX-NEXT:    vmovaps %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %y = load <8 x float>, <8 x float>* %yp, align 4
   %mask = fcmp ogt <8 x float> %y, %x
@@ -504,9 +764,19 @@ define <8 x float> @test35(<8 x float> %
 }
 
 define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp) nounwind {
-; SKX-LABEL: test36:     
-; SKX: vcmpltps        (%rdi), %zmm0, %k1 
-; SKX: vmovaps %zmm0, %zmm1 {%k1}
+; KNL-LABEL: test36:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltps (%rdi), %zmm0, %k1
+; KNL-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovaps %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test36:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltps (%rdi), %zmm0, %k1
+; SKX-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
   %y = load <16 x float>, <16 x float>* %yp, align 4
   %mask = fcmp olt <16 x float> %x, %y
   %max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %x1
@@ -514,9 +784,19 @@ define <16 x float> @test36(<16 x float>
 }
 
 define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nounwind {
-; SKX-LABEL: test37:                                
-; SKX: vcmpltpd  (%rdi){1to8}, %zmm0, %k1 
-; SKX: vmovapd %zmm0, %zmm1 {%k1}
+; KNL-LABEL: test37:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltpd (%rdi){1to8}, %zmm0, %k1
+; KNL-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovaps %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test37:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltpd (%rdi){1to8}, %zmm0, %k1
+; SKX-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %a = load double, double* %ptr
   %v = insertelement <8 x double> undef, double %a, i32 0
@@ -528,28 +808,48 @@ define <8 x double> @test37(<8 x double>
 }
 
 define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nounwind {
-; SKX-LABEL: test38:                                
-; SKX: vcmpltpd  (%rdi){1to4}, %ymm0, %k1 
-; SKX: vmovapd %ymm0, %ymm1 {%k1}
+; KNL-LABEL: test38:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vbroadcastsd (%rdi), %ymm2
+; KNL-NEXT:    vcmpltpd %ymm2, %ymm0, %ymm2
+; KNL-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test38:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltpd (%rdi){1to4}, %ymm0, %k1
+; SKX-NEXT:    vmovapd %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %a = load double, double* %ptr
   %v = insertelement <4 x double> undef, double %a, i32 0
   %shuffle = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> zeroinitializer
-  
+
   %mask = fcmp ogt <4 x double> %shuffle, %x
   %max = select <4 x i1> %mask, <4 x double> %x, <4 x double> %x1
   ret <4 x double> %max
 }
 
 define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nounwind {
-; SKX-LABEL: test39:                                
-; SKX: vcmpltpd  (%rdi){1to2}, %xmm0, %k1 
-; SKX: vmovapd %xmm0, %xmm1 {%k1}
+; KNL-LABEL: test39:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; KNL-NEXT:    vcmpltpd %xmm2, %xmm0, %xmm2
+; KNL-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test39:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltpd (%rdi){1to2}, %xmm0, %k1
+; SKX-NEXT:    vmovapd %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %a = load double, double* %ptr
   %v = insertelement <2 x double> undef, double %a, i32 0
   %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
-  
+
   %mask = fcmp ogt <2 x double> %shuffle, %x
   %max = select <2 x i1> %mask, <2 x double> %x, <2 x double> %x1
   ret <2 x double> %max
@@ -557,59 +857,170 @@ define <2 x double> @test39(<2 x double>
 
 
 define <16  x float> @test40(<16  x float> %x, <16  x float> %x1, float* %ptr) nounwind {
-; SKX-LABEL: test40:                                
-; SKX: vcmpltps  (%rdi){1to16}, %zmm0, %k1 
-; SKX: vmovaps %zmm0, %zmm1 {%k1}
+; KNL-LABEL: test40:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpltps (%rdi){1to16}, %zmm0, %k1
+; KNL-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovaps %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test40:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltps (%rdi){1to16}, %zmm0, %k1
+; SKX-NEXT:    vmovaps %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %a = load float, float* %ptr
   %v = insertelement <16  x float> undef, float %a, i32 0
   %shuffle = shufflevector <16  x float> %v, <16  x float> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
-  
+
   %mask = fcmp ogt <16  x float> %shuffle, %x
   %max = select <16 x i1> %mask, <16  x float> %x, <16  x float> %x1
   ret <16  x float> %max
 }
 
 define <8  x float> @test41(<8  x float> %x, <8  x float> %x1, float* %ptr) nounwind {
-; SKX-LABEL: test41:                                
-; SKX: vcmpltps  (%rdi){1to8}, %ymm0, %k1 
-; SKX: vmovaps %ymm0, %ymm1 {%k1}
+; KNL-LABEL: test41:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vbroadcastss (%rdi), %ymm2
+; KNL-NEXT:    vcmpltps %zmm2, %zmm0, %k1
+; KNL-NEXT:    vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test41:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltps (%rdi){1to8}, %ymm0, %k1
+; SKX-NEXT:    vmovaps %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %a = load float, float* %ptr
   %v = insertelement <8  x float> undef, float %a, i32 0
   %shuffle = shufflevector <8  x float> %v, <8  x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
-  
+
   %mask = fcmp ogt <8  x float> %shuffle, %x
   %max = select <8 x i1> %mask, <8  x float> %x, <8  x float> %x1
   ret <8  x float> %max
 }
 
 define <4  x float> @test42(<4  x float> %x, <4  x float> %x1, float* %ptr) nounwind {
-; SKX-LABEL: test42:                                
-; SKX: vcmpltps  (%rdi){1to4}, %xmm0, %k1 
-; SKX: vmovaps %xmm0, %xmm1 {%k1}
-  
+; KNL-LABEL: test42:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vbroadcastss (%rdi), %xmm2
+; KNL-NEXT:    vcmpltps %xmm2, %xmm0, %xmm2
+; KNL-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test42:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpltps (%rdi){1to4}, %xmm0, %k1
+; SKX-NEXT:    vmovaps %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
+
   %a = load float, float* %ptr
   %v = insertelement <4  x float> undef, float %a, i32 0
   %shuffle = shufflevector <4  x float> %v, <4  x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
-  
+
   %mask = fcmp ogt <4  x float> %shuffle, %x
   %max = select <4 x i1> %mask, <4  x float> %x, <4  x float> %x1
   ret <4  x float> %max
 }
 
 define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x i1> %mask_in) nounwind {
-; SKX-LABEL: test43:                                
-; SKX: vpmovw2m  %xmm2, %k1
-; SKX: vcmpltpd  (%rdi){1to8}, %zmm0, %k1 {%k1}
-; SKX: vmovapd %zmm0, %zmm1 {%k1}
+; KNL-LABEL: test43:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpmovsxwq %xmm2, %zmm2
+; KNL-NEXT:    vpsllq $63, %zmm2, %zmm2
+; KNL-NEXT:    vptestmq %zmm2, %zmm2, %k1
+; KNL-NEXT:    vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
+; KNL-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovaps %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test43:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpsllw $15, %xmm2, %xmm2
+; SKX-NEXT:    vpmovw2m %xmm2, %k1
+; SKX-NEXT:    vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
+; SKX-NEXT:    vmovapd %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
 
   %a = load double, double* %ptr
   %v = insertelement <8 x double> undef, double %a, i32 0
   %shuffle = shufflevector <8 x double> %v, <8 x double> undef, <8 x i32> zeroinitializer
-  
+
   %mask_cmp = fcmp ogt <8 x double> %shuffle, %x
   %mask = and <8 x i1> %mask_cmp, %mask_in
   %max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %x1
   ret <8 x double> %max
 }
+
+define <4 x i32> @test44(<4 x i16> %x, <4 x i16> %y) #0 {
+; KNL-LABEL: test44:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; KNL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; KNL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; KNL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test44:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; SKX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; SKX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0
+; SKX-NEXT:    vpmovm2d %k0, %xmm0
+; SKX-NEXT:    retq
+  %mask = icmp eq <4 x i16> %x, %y
+  %1 = sext <4 x i1> %mask to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <2 x i64> @test45(<2 x i16> %x, <2 x i16> %y) #0 {
+; KNL-LABEL: test45:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; KNL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
+; KNL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
+; KNL-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test45:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; SKX-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
+; SKX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
+; SKX-NEXT:    vpcmpeqq %xmm1, %xmm0, %k1
+; SKX-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %mask = icmp eq <2 x i16> %x, %y
+  %1 = zext <2 x i1> %mask to <2 x i64>
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @test46(<2 x float> %x, <2 x float> %y) #0 {
+; KNL-LABEL: test46:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm0
+; KNL-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; KNL-NEXT:    vpsllq $32, %xmm0, %xmm0
+; KNL-NEXT:    vpsrad $31, %xmm0, %xmm1
+; KNL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test46:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vcmpeqps %xmm1, %xmm0, %k1
+; SKX-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %mask = fcmp oeq <2 x float> %x, %y
+  %1 = zext <2 x i1> %mask to <2 x i64>
+  ret <2 x i64> %1
+}




More information about the llvm-commits mailing list