[llvm] r319405 - [X86] Make sure we don't remove sign extends of masks with AVX2 masked gathers.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 29 22:31:31 PST 2017


Author: ctopper
Date: Wed Nov 29 22:31:31 2017
New Revision: 319405

URL: http://llvm.org/viewvc/llvm-project?rev=319405&view=rev
Log:
[X86] Make sure we don't remove sign extends of masks with AVX2 masked gathers.

We don't use k-registers and instead use the MSB so we need to make sure we sign extend the mask to the msb.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=319405&r1=319404&r2=319405&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Nov 29 22:31:31 2017
@@ -35924,7 +35924,8 @@ static SDValue combineSetCC(SDNode *N, S
 }
 
 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
-                                    TargetLowering::DAGCombinerInfo &DCI) {
+                                    TargetLowering::DAGCombinerInfo &DCI,
+                                    const X86Subtarget &Subtarget) {
   SDLoc DL(N);
 
   // Pre-shrink oversized index elements to avoid triggering scalarization.
@@ -35967,7 +35968,7 @@ static SDValue combineGatherScatter(SDNo
   // the masks is v*i1. So the mask will be truncated anyway.
   // The SIGN_EXTEND_INREG my be dropped.
   SDValue Mask = N->getOperand(2);
-  if (Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+  if (Subtarget.hasAVX512() && Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
     SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
     NewOps[2] = Mask.getOperand(0);
     DAG.UpdateNodeOperands(N, NewOps);
@@ -37079,7 +37080,7 @@ SDValue X86TargetLowering::PerformDAGCom
   case X86ISD::FMADDSUB:
   case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, Subtarget);
   case ISD::MGATHER:
-  case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
+  case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI, Subtarget);
   case X86ISD::TESTM:       return combineTestM(N, DAG, Subtarget);
   case X86ISD::PCMPEQ:
   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);

Modified: llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll?rev=319405&r1=319404&r2=319405&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll Wed Nov 29 22:31:31 2017
@@ -9,6 +9,9 @@ declare <2 x i32> @llvm.masked.gather.v2
 define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
 ; X86-LABEL: masked_gather_v2i32:
 ; X86:       # BB#0: # %entry
+; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
 ; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -19,6 +22,9 @@ define <2 x i32> @masked_gather_v2i32(<2
 ;
 ; X64-LABEL: masked_gather_v2i32:
 ; X64:       # BB#0: # %entry
+; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa (%rdi), %xmm2
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -57,6 +63,9 @@ entry:
 define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
 ; X86-LABEL: masked_gather_v2i32_concat:
 ; X86:       # BB#0: # %entry
+; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
 ; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -68,6 +77,9 @@ define <4 x i32> @masked_gather_v2i32_co
 ;
 ; X64-LABEL: masked_gather_v2i32_concat:
 ; X64:       # BB#0: # %entry
+; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa (%rdi), %xmm2
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -112,6 +124,8 @@ define <2 x float> @masked_gather_v2floa
 ; X86-LABEL: masked_gather_v2float:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; X86-NEXT:    vpslld $31, %xmm0, %xmm0
+; X86-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
 ; X86-NEXT:    vgatherdps %xmm0, (,%xmm2), %xmm1
@@ -121,6 +135,8 @@ define <2 x float> @masked_gather_v2floa
 ; X64-LABEL: masked_gather_v2float:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; X64-NEXT:    vpslld $31, %xmm0, %xmm0
+; X64-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-NEXT:    vmovaps (%rdi), %xmm2
 ; X64-NEXT:    vgatherqps %xmm0, (,%ymm2), %xmm1
 ; X64-NEXT:    vmovaps %xmm1, %xmm0
@@ -159,6 +175,8 @@ define <4 x float> @masked_gather_v2floa
 ; X86-LABEL: masked_gather_v2float_concat:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; X86-NEXT:    vpslld $31, %xmm0, %xmm0
+; X86-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
 ; X86-NEXT:    vgatherdps %xmm0, (,%xmm2), %xmm1
@@ -168,6 +186,8 @@ define <4 x float> @masked_gather_v2floa
 ; X64-LABEL: masked_gather_v2float_concat:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; X64-NEXT:    vpslld $31, %xmm0, %xmm0
+; X64-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-NEXT:    vmovaps (%rdi), %xmm2
 ; X64-NEXT:    vgatherqps %xmm0, (,%ymm2), %xmm1
 ; X64-NEXT:    vmovaps %xmm1, %xmm0
@@ -209,12 +229,16 @@ declare <4 x i32> @llvm.masked.gather.v4
 define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i32> %passthro) {
 ; X86-LABEL: masked_gather_v4i32:
 ; X86:       # BB#0: # %entry
+; X86-NEXT:    vpslld $31, %xmm1, %xmm1
+; X86-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X86-NEXT:    vpgatherdd %xmm1, (,%xmm0), %xmm2
 ; X86-NEXT:    vmovdqa %xmm2, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: masked_gather_v4i32:
 ; X64:       # BB#0: # %entry
+; X64-NEXT:    vpslld $31, %xmm1, %xmm1
+; X64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X64-NEXT:    vpgatherqd %xmm1, (,%ymm0), %xmm2
 ; X64-NEXT:    vmovdqa %xmm2, %xmm0
 ; X64-NEXT:    vzeroupper
@@ -267,12 +291,16 @@ declare <4 x float> @llvm.masked.gather.
 define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <4 x float> %passthro) {
 ; X86-LABEL: masked_gather_v4float:
 ; X86:       # BB#0: # %entry
+; X86-NEXT:    vpslld $31, %xmm1, %xmm1
+; X86-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X86-NEXT:    vgatherdps %xmm1, (,%xmm0), %xmm2
 ; X86-NEXT:    vmovaps %xmm2, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: masked_gather_v4float:
 ; X64:       # BB#0: # %entry
+; X64-NEXT:    vpslld $31, %xmm1, %xmm1
+; X64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X64-NEXT:    vgatherqps %xmm1, (,%ymm0), %xmm2
 ; X64-NEXT:    vmovaps %xmm2, %xmm0
 ; X64-NEXT:    vzeroupper
@@ -326,6 +354,8 @@ define <8 x i32> @masked_gather_v8i32(<8
 ; X86-LABEL: masked_gather_v8i32:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X86-NEXT:    vpslld $31, %ymm0, %ymm0
+; X86-NEXT:    vpsrad $31, %ymm0, %ymm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovdqa (%eax), %ymm2
 ; X86-NEXT:    vpgatherdd %ymm0, (,%ymm2), %ymm1
@@ -441,6 +471,8 @@ define <8 x float> @masked_gather_v8floa
 ; X86-LABEL: masked_gather_v8float:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X86-NEXT:    vpslld $31, %ymm0, %ymm0
+; X86-NEXT:    vpsrad $31, %ymm0, %ymm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovaps (%eax), %ymm2
 ; X86-NEXT:    vgatherdps %ymm0, (,%ymm2), %ymm1
@@ -710,14 +742,20 @@ declare <2 x i64> @llvm.masked.gather.v2
 define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i64> %passthro) {
 ; X86-LABEL: masked_gather_v2i64:
 ; X86:       # BB#0: # %entry
+; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X86-NEXT:    vpgatherqq %xmm0, (,%xmm2), %xmm1
+; X86-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
+; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vpgatherqq %xmm0, (,%xmm3), %xmm1
 ; X86-NEXT:    vmovdqa %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: masked_gather_v2i64:
 ; X64:       # BB#0: # %entry
+; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa (%rdi), %xmm2
 ; X64-NEXT:    vpgatherqq %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vmovdqa %xmm1, %xmm0
@@ -755,14 +793,20 @@ declare <2 x double> @llvm.masked.gather
 define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks, <2 x double> %passthro) {
 ; X86-LABEL: masked_gather_v2double:
 ; X86:       # BB#0: # %entry
+; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X86-NEXT:    vgatherqpd %xmm0, (,%xmm2), %xmm1
+; X86-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
+; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vgatherqpd %xmm0, (,%xmm3), %xmm1
 ; X86-NEXT:    vmovapd %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: masked_gather_v2double:
 ; X64:       # BB#0: # %entry
+; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovapd (%rdi), %xmm2
 ; X64-NEXT:    vgatherqpd %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vmovapd %xmm1, %xmm0




More information about the llvm-commits mailing list