[llvm] r315172 - [X86] Stop LowerSIGN_EXTEND_AVX512 from creating v8i16/v16i16/v16i8 vselects with a v8i1/v16i1 condition when BWI is not available.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 8 01:50:59 PDT 2017
Author: ctopper
Date: Sun Oct 8 01:50:59 2017
New Revision: 315172
URL: http://llvm.org/viewvc/llvm-project?rev=315172&view=rev
Log:
[X86] Stop LowerSIGN_EXTEND_AVX512 from creating v8i16/v16i16/v16i8 vselects with a v8i1/v16i1 condition when BWI is not available.
Some of the tests in vector-shuffle-v1.ll would get into an infinite loop without this.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=315172&r1=315171&r2=315172&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Oct 8 01:50:59 2017
@@ -18028,8 +18028,13 @@ static SDValue LowerSIGN_EXTEND_AVX512(S
return SDValue();
MVT ExtVT = VT;
- if (!VT.is512BitVector() && !Subtarget.hasVLX())
+ if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
+ } else if (VTElt == MVT::i16 || VTElt == MVT::i8) {
+ // If we don't have BWI support we need to extend 8/16-bit to 32-bit.
+ // Otherwise we end up with vselects we can't handle.
+ ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
+ }
SDValue V;
if (Subtarget.hasDQI()) {
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll?rev=315172&r1=315171&r2=315172&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll Sun Oct 8 01:50:59 2017
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw -mattr=+avx512vl -mattr=+avx512dq| FileCheck %s --check-prefix=VL_BW_DQ
define <2 x i1> @shuf2i1_1_0(<2 x i1> %a) {
@@ -8,6 +9,18 @@ define <2 x i1> @shuf2i1_1_0(<2 x i1> %a
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf2i1_1_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmq %xmm1, %xmm1, %k1
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf2i1_1_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
@@ -29,6 +42,21 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a
; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf2i1_1_2:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: movb $1, %al
+; AVX512VL-NEXT: kmovw %eax, %k1
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm2 {%k1} {z}
+; AVX512VL-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
+; AVX512VL-NEXT: vpsllq $63, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmq %xmm1, %xmm1, %k1
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf2i1_1_2:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
@@ -52,6 +80,18 @@ define <4 x i1> @shuf4i1_3_2_10(<4 x i1>
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf4i1_3_2_10:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,2,1,0]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf4i1_3_2_10:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpslld $31, %xmm0, %xmm0
@@ -79,6 +119,20 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
+; AVX512VL-NEXT: vpermq %zmm0, %zmm1, %zmm0
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
@@ -111,6 +165,21 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %zmm3, %zmm1, %k2
+; AVX512VL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512VL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512VL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
+; AVX512VL-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512VL-NEXT: vpslld $31, %zmm2, %zmm0
+; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
@@ -139,6 +208,15 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7
; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,0,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,0,0,0,0,0,0,0,0,0,0,255,0,0]
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpsllw $7, %ymm0, %ymm0
@@ -167,6 +245,20 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512VL-NEXT: vpbroadcastq %xmm0, %zmm0
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -197,6 +289,20 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
+; AVX512VL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -228,6 +334,18 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -259,6 +377,20 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
+; AVX512VL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -292,6 +424,20 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -327,6 +473,22 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: movb $51, %al
+; AVX512VL-NEXT: kmovw %eax, %k2
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512VL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1]
+; AVX512VL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -364,6 +526,22 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpmovsxwq %xmm0, %zmm0
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,3,4,5,6,7]
+; AVX512VL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512VL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpsllw $15, %xmm0, %xmm0
@@ -396,6 +574,18 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: kmovw %edi, %k1
+; AVX512VL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vpbroadcastd %xmm0, %zmm0
+; AVX512VL-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, %eax
+; AVX512VL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
@@ -448,6 +638,41 @@ define i64 @shuf64i1_zero(i64 %a) {
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
+; AVX512VL-LABEL: shuf64i1_zero:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: pushq %rbp
+; AVX512VL-NEXT: .Lcfi0:
+; AVX512VL-NEXT: .cfi_def_cfa_offset 16
+; AVX512VL-NEXT: .Lcfi1:
+; AVX512VL-NEXT: .cfi_offset %rbp, -16
+; AVX512VL-NEXT: movq %rsp, %rbp
+; AVX512VL-NEXT: .Lcfi2:
+; AVX512VL-NEXT: .cfi_def_cfa_register %rbp
+; AVX512VL-NEXT: andq $-32, %rsp
+; AVX512VL-NEXT: subq $96, %rsp
+; AVX512VL-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX512VL-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; AVX512VL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpbroadcastb %xmm0, %ymm0
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512VL-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512VL-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512VL-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512VL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512VL-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512VL-NEXT: kmovw %k0, (%rsp)
+; AVX512VL-NEXT: movl (%rsp), %ecx
+; AVX512VL-NEXT: movq %rcx, %rax
+; AVX512VL-NEXT: shlq $32, %rax
+; AVX512VL-NEXT: orq %rcx, %rax
+; AVX512VL-NEXT: movq %rbp, %rsp
+; AVX512VL-NEXT: popq %rbp
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; VL_BW_DQ-LABEL: shuf64i1_zero:
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: kmovq %rdi, %k0
More information about the llvm-commits
mailing list