[llvm] r294462 - [x86] add AVX512vl target for more coverage; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 8 07:22:52 PST 2017


Author: spatel
Date: Wed Feb  8 09:22:52 2017
New Revision: 294462

URL: http://llvm.org/viewvc/llvm-project?rev=294462&view=rev
Log:
[x86] add AVX512vl target for more coverage; NFC

Modified:
    llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll

Modified: llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll?rev=294462&r1=294461&r2=294462&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll Wed Feb  8 09:22:52 2017
@@ -1,7 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx       | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2      | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f  | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx       | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX12 --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2      | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX12 --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f   | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl  | FileCheck %s --check-prefix=AVX                       --check-prefix=AVX512 --check-prefix=AVX512VL
 
 ; The condition vector for BLENDV* only cares about the sign bit of each element.
 ; So in these tests, if we generate BLENDV*, we should be able to remove the redundant cmp op.
@@ -23,62 +24,99 @@ define <16 x i8> @signbit_sel_v16i8(<16
 ; Sorry 16-bit, you're not important enough to support?
 
 define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) {
-; AVX-LABEL: signbit_sel_v8i16:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vpandn %xmm1, %xmm2, %xmm1
-; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX12F-LABEL: signbit_sel_v8i16:
+; AVX12F:       # BB#0:
+; AVX12F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX12F-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX12F-NEXT:    vpandn %xmm1, %xmm2, %xmm1
+; AVX12F-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX12F-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX12F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v8i16:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpandnq %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <8 x i16> %mask, zeroinitializer
   %z = select <8 x i1> %tr, <8 x i16> %x, <8 x i16> %y
   ret <8 x i16> %z
 }
 
 define <4 x i32> @signbit_sel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) {
-; AVX-LABEL: signbit_sel_v4i32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX12F-LABEL: signbit_sel_v4i32:
+; AVX12F:       # BB#0:
+; AVX12F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX12F-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX12F-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v4i32:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <4 x i32> %mask, zeroinitializer
   %z = select <4 x i1> %tr, <4 x i32> %x, <4 x i32> %y
   ret <4 x i32> %z
 }
 
 define <2 x i64> @signbit_sel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
-; AVX-LABEL: signbit_sel_v2i64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX12F-LABEL: signbit_sel_v2i64:
+; AVX12F:       # BB#0:
+; AVX12F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX12F-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX12F-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v2i64:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <2 x i64> %mask, zeroinitializer
   %z = select <2 x i1> %tr, <2 x i64> %x, <2 x i64> %y
   ret <2 x i64> %z
 }
 
 define <4 x float> @signbit_sel_v4f32(<4 x float> %x, <4 x float> %y, <4 x i32> %mask) {
-; AVX-LABEL: signbit_sel_v4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX12F-LABEL: signbit_sel_v4f32:
+; AVX12F:       # BB#0:
+; AVX12F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX12F-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX12F-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f32:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <4 x i32> %mask, zeroinitializer
   %z = select <4 x i1> %tr, <4 x float> %x, <4 x float> %y
   ret <4 x float> %z
 }
 
 define <2 x double> @signbit_sel_v2f64(<2 x double> %x, <2 x double> %y, <2 x i64> %mask) {
-; AVX-LABEL: signbit_sel_v2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX12F-LABEL: signbit_sel_v2f64:
+; AVX12F:       # BB#0:
+; AVX12F-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX12F-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX12F-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v2f64:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <2 x i64> %mask, zeroinitializer
   %z = select <2 x i1> %tr, <2 x double> %x, <2 x double> %y
   ret <2 x double> %z
@@ -106,12 +144,12 @@ define <32 x i8> @signbit_sel_v32i8(<32
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: signbit_sel_v32i8:
-; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxor %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT:    retq
+; AVX512-LABEL: signbit_sel_v32i8:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
   %tr = icmp slt <32 x i8> %mask, zeroinitializer
   %z = select <32 x i1> %tr, <32 x i8> %x, <32 x i8> %y
   ret <32 x i8> %z
@@ -149,6 +187,15 @@ define <16 x i16> @signbit_sel_v16i16(<1
 ; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v16i16:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpandnq %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <16 x i16> %mask, zeroinitializer
   %z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y
   ret <16 x i16> %z
@@ -182,6 +229,13 @@ define <8 x i32> @signbit_sel_v8i32(<8 x
 ; AVX512F-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; AVX512F-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
 ; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v8i32:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
+; AVX512VL-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <8 x i32> %mask, zeroinitializer
   %z = select <8 x i1> %tr, <8 x i32> %x, <8 x i32> %y
   ret <8 x i32> %z
@@ -211,6 +265,13 @@ define <4 x i64> @signbit_sel_v4i64(<4 x
 ; AVX512F-NEXT:    vpcmpgtq %ymm2, %ymm3, %ymm2
 ; AVX512F-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v4i64:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
+; AVX512VL-NEXT:    vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <4 x i64> %mask, zeroinitializer
   %z = select <4 x i1> %tr, <4 x i64> %x, <4 x i64> %y
   ret <4 x i64> %z
@@ -240,6 +301,13 @@ define <4 x double> @signbit_sel_v4f64(<
 ; AVX512F-NEXT:    vpcmpgtq %ymm2, %ymm3, %ymm2
 ; AVX512F-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f64:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
+; AVX512VL-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <4 x i64> %mask, zeroinitializer
   %z = select <4 x i1> %tr, <4 x double> %x, <4 x double> %y
   ret <4 x double> %z
@@ -274,6 +342,13 @@ define <4 x double> @signbit_sel_v4f64_s
 ; AVX512F-NEXT:    vpmovsxdq %xmm2, %ymm2
 ; AVX512F-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f64_small_mask:
+; AVX512VL:       # BB#0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT:    retq
   %tr = icmp slt <4 x i32> %mask, zeroinitializer
   %z = select <4 x i1> %tr, <4 x double> %x, <4 x double> %y
   ret <4 x double> %z
@@ -306,12 +381,12 @@ define <8 x double> @signbit_sel_v8f64(<
 ; AVX2-NEXT:    vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: signbit_sel_v8f64:
-; AVX512F:       # BB#0:
-; AVX512F-NEXT:    vpxord %zmm3, %zmm3, %zmm3
-; AVX512F-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
-; AVX512F-NEXT:    vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT:    retq
+; AVX512-LABEL: signbit_sel_v8f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpxord %zmm3, %zmm3, %zmm3
+; AVX512-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
+; AVX512-NEXT:    vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; AVX512-NEXT:    retq
   %tr = icmp slt <8 x i64> %mask, zeroinitializer
   %z = select <8 x i1> %tr, <8 x double> %x, <8 x double> %y
   ret <8 x double> %z




More information about the llvm-commits mailing list