[llvm] r288635 - [AVX-512] Add avx512f command lines to fast isel SSE select test.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 4 20:51:28 PST 2016


Author: ctopper
Date: Sun Dec  4 22:51:28 2016
New Revision: 288635

URL: http://llvm.org/viewvc/llvm-project?rev=288635&view=rev
Log:
[AVX-512] Add avx512f command lines to fast isel SSE select test.

Currently the fast isel code emits an avx1 instruction sequence even with avx512. This is different than normal isel. A follow up commit will fix this.

Modified:
    llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll?rev=288635&r1=288634&r2=288635&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll Sun Dec  4 22:51:28 2016
@@ -3,6 +3,8 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1            | FileCheck %s --check-prefix=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown                               -mattr=avx | FileCheck %s --check-prefix=AVX
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown                               -mattr=avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512FAST
 
 ; Test all cmp predicates that can be used with SSE.
 
@@ -21,6 +23,19 @@ define float @select_fcmp_oeq_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_oeq_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpeqss %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_oeq_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp oeq float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -41,6 +56,19 @@ define double @select_fcmp_oeq_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_oeq_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpeqsd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_oeq_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpeqsd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp oeq double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -62,6 +90,19 @@ define float @select_fcmp_ogt_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ogt_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpltss %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ogt_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpltss %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ogt float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -83,6 +124,19 @@ define double @select_fcmp_ogt_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ogt_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpltsd %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ogt_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpltsd %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ogt double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -104,6 +158,19 @@ define float @select_fcmp_oge_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_oge_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpless %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_oge_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpless %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp oge float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -125,6 +192,19 @@ define double @select_fcmp_oge_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_oge_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmplesd %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_oge_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp oge double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -145,6 +225,19 @@ define float @select_fcmp_olt_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_olt_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpltss %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_olt_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpltss %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp olt float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -165,6 +258,19 @@ define double @select_fcmp_olt_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_olt_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpltsd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_olt_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpltsd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp olt double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -185,6 +291,19 @@ define float @select_fcmp_ole_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ole_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpless %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ole_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpless %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ole float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -205,6 +324,19 @@ define double @select_fcmp_ole_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ole_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmplesd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ole_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmplesd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ole double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -225,6 +357,19 @@ define float @select_fcmp_ord_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ord_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpordss %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ord_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpordss %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ord float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -245,6 +390,19 @@ define double @select_fcmp_ord_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ord_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpordsd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ord_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ord double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -265,6 +423,19 @@ define float @select_fcmp_uno_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_uno_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpunordss %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_uno_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpunordss %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp uno float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -285,6 +456,19 @@ define double @select_fcmp_uno_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_uno_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpunordsd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_uno_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpunordsd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp uno double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -305,6 +489,19 @@ define float @select_fcmp_ugt_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ugt_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnless %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ugt_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnless %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ugt float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -325,6 +522,19 @@ define double @select_fcmp_ugt_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ugt_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnlesd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ugt_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnlesd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ugt double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -345,6 +555,19 @@ define float @select_fcmp_uge_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_uge_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnltss %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_uge_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnltss %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp uge float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -365,6 +588,19 @@ define double @select_fcmp_uge_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_uge_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnltsd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_uge_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnltsd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp uge double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -386,6 +622,19 @@ define float @select_fcmp_ult_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ult_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnless %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ult_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnless %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ult float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -407,6 +656,19 @@ define double @select_fcmp_ult_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ult_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnlesd %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ult_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnlesd %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ult double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -428,6 +690,19 @@ define float @select_fcmp_ule_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ule_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnltss %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ule_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnltss %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ule float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -449,6 +724,19 @@ define double @select_fcmp_ule_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_ule_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpnltsd %xmm0, %xmm1, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_ule_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpnltsd %xmm0, %xmm1, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp ule double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -469,6 +757,19 @@ define float @select_fcmp_une_f32(float
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_une_f32:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpneqss %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovaps %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_une_f32:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpneqss %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp une float %a, %b
   %2 = select i1 %1, float %c, float %d
   ret float %2
@@ -489,6 +790,19 @@ define double @select_fcmp_une_f64(doubl
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
+; AVX512SLOW-LABEL: select_fcmp_une_f64:
+; AVX512SLOW:       # BB#0:
+; AVX512SLOW-NEXT:    vcmpneqsd %xmm1, %xmm0, %k1
+; AVX512SLOW-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512SLOW-NEXT:    vmovapd %xmm3, %xmm0
+; AVX512SLOW-NEXT:    retq
+;
+; AVX512FAST-LABEL: select_fcmp_une_f64:
+; AVX512FAST:       # BB#0:
+; AVX512FAST-NEXT:    vcmpneqsd %xmm1, %xmm0, %xmm0
+; AVX512FAST-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
+; AVX512FAST-NEXT:    retq
+;
   %1 = fcmp une double %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2




More information about the llvm-commits mailing list