[llvm] r319514 - [X86] Add a DAG combine to simplify masks for AVX2 gather instructions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 30 18:49:07 PST 2017


Author: ctopper
Date: Thu Nov 30 18:49:07 2017
New Revision: 319514

URL: http://llvm.org/viewvc/llvm-project?rev=319514&view=rev
Log:
[X86] Add a DAG combine to simplify masks for AVX2 gather instructions.

AVX2 gathers only use the upper bit of the mask allowing us to simplify sign_extend_inreg to a shift left.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=319514&r1=319513&r2=319514&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Nov 30 18:49:07 2017
@@ -35991,6 +35991,21 @@ static SDValue combineGatherScatter(SDNo
     NewOps[2] = Mask.getOperand(0);
     DAG.UpdateNodeOperands(N, NewOps);
   }
+
+  // With AVX2 we only demand the upper bit of the mask.
+  if (!Subtarget.hasAVX512()) {
+    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+    TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+                                          !DCI.isBeforeLegalizeOps());
+    KnownBits Known;
+    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
+    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, Known, TLO)) {
+      DCI.AddToWorklist(Mask.getNode());
+      DCI.CommitTargetLoweringOpt(TLO);
+      return SDValue(N, 0);
+    }
+  }
+
   return SDValue();
 }
 
@@ -37097,6 +37112,8 @@ SDValue X86TargetLowering::PerformDAGCom
   case X86ISD::FMSUBADD_RND:
   case X86ISD::FMADDSUB:
   case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, Subtarget);
+  case X86ISD::MGATHER:
+  case X86ISD::MSCATTER:
   case ISD::MGATHER:
   case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI, Subtarget);
   case X86ISD::TESTM:       return combineTestM(N, DAG, Subtarget);

Modified: llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll?rev=319514&r1=319513&r2=319514&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll Thu Nov 30 18:49:07 2017
@@ -10,12 +10,10 @@ define <2 x i32> @masked_gather_v2i32(<2
 ; X86-LABEL: masked_gather_v2i32:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-NEXT:    vpgatherqd %xmm0, (,%xmm2), %xmm1
 ; X86-NEXT:    vpmovsxdq %xmm1, %xmm0
 ; X86-NEXT:    retl
@@ -23,11 +21,9 @@ define <2 x i32> @masked_gather_v2i32(<2
 ; X64-LABEL: masked_gather_v2i32:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa (%rdi), %xmm2
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpgatherqd %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vpmovsxdq %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -64,12 +60,10 @@ define <4 x i32> @masked_gather_v2i32_co
 ; X86-LABEL: masked_gather_v2i32_concat:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
-; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-NEXT:    vpgatherqd %xmm0, (,%xmm2), %xmm1
 ; X86-NEXT:    vpmovsxdq %xmm1, %xmm0
 ; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -78,11 +72,9 @@ define <4 x i32> @masked_gather_v2i32_co
 ; X64-LABEL: masked_gather_v2i32_concat:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa (%rdi), %xmm2
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpgatherqd %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vpmovsxdq %xmm1, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -125,7 +117,6 @@ define <2 x float> @masked_gather_v2floa
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
 ; X86-NEXT:    vpslld $31, %xmm0, %xmm0
-; X86-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
 ; X86-NEXT:    vgatherdps %xmm0, (,%xmm2), %xmm1
@@ -137,7 +128,6 @@ define <2 x float> @masked_gather_v2floa
 ; X64-NEXT:    vmovaps (%rdi), %xmm2
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpslld $31, %xmm0, %xmm0
-; X64-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-NEXT:    vgatherqps %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vmovaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -175,7 +165,6 @@ define <4 x float> @masked_gather_v2floa
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
 ; X86-NEXT:    vpslld $31, %xmm0, %xmm0
-; X86-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
 ; X86-NEXT:    vgatherdps %xmm0, (,%xmm2), %xmm1
@@ -187,7 +176,6 @@ define <4 x float> @masked_gather_v2floa
 ; X64-NEXT:    vmovaps (%rdi), %xmm2
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    vpslld $31, %xmm0, %xmm0
-; X64-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-NEXT:    vgatherqps %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vmovaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -228,7 +216,6 @@ define <4 x i32> @masked_gather_v4i32(<4
 ; X86-LABEL: masked_gather_v4i32:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpslld $31, %xmm1, %xmm1
-; X86-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X86-NEXT:    vpgatherdd %xmm1, (,%xmm0), %xmm2
 ; X86-NEXT:    vmovdqa %xmm2, %xmm0
 ; X86-NEXT:    retl
@@ -236,7 +223,6 @@ define <4 x i32> @masked_gather_v4i32(<4
 ; X64-LABEL: masked_gather_v4i32:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpslld $31, %xmm1, %xmm1
-; X64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X64-NEXT:    vpgatherqd %xmm1, (,%ymm0), %xmm2
 ; X64-NEXT:    vmovdqa %xmm2, %xmm0
 ; X64-NEXT:    vzeroupper
@@ -290,7 +276,6 @@ define <4 x float> @masked_gather_v4floa
 ; X86-LABEL: masked_gather_v4float:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpslld $31, %xmm1, %xmm1
-; X86-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X86-NEXT:    vgatherdps %xmm1, (,%xmm0), %xmm2
 ; X86-NEXT:    vmovaps %xmm2, %xmm0
 ; X86-NEXT:    retl
@@ -298,7 +283,6 @@ define <4 x float> @masked_gather_v4floa
 ; X64-LABEL: masked_gather_v4float:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpslld $31, %xmm1, %xmm1
-; X64-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X64-NEXT:    vgatherqps %xmm1, (,%ymm0), %xmm2
 ; X64-NEXT:    vmovaps %xmm2, %xmm0
 ; X64-NEXT:    vzeroupper
@@ -353,7 +337,6 @@ define <8 x i32> @masked_gather_v8i32(<8
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; X86-NEXT:    vpslld $31, %ymm0, %ymm0
-; X86-NEXT:    vpsrad $31, %ymm0, %ymm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovdqa (%eax), %ymm2
 ; X86-NEXT:    vpgatherdd %ymm0, (,%ymm2), %ymm1
@@ -470,7 +453,6 @@ define <8 x float> @masked_gather_v8floa
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; X86-NEXT:    vpslld $31, %ymm0, %ymm0
-; X86-NEXT:    vpsrad $31, %ymm0, %ymm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovaps (%eax), %ymm2
 ; X86-NEXT:    vgatherdps %ymm0, (,%ymm2), %ymm1
@@ -587,7 +569,6 @@ define <4 x i64> @masked_gather_v4i64(<4
 ; X86-LABEL: masked_gather_v4i64:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpslld $31, %xmm0, %xmm0
-; X86-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X86-NEXT:    vpmovsxdq %xmm0, %ymm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovdqa (%eax), %xmm2
@@ -598,7 +579,6 @@ define <4 x i64> @masked_gather_v4i64(<4
 ; X64-LABEL: masked_gather_v4i64:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpslld $31, %xmm0, %xmm0
-; X64-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-NEXT:    vpmovsxdq %xmm0, %ymm0
 ; X64-NEXT:    vmovdqa (%rdi), %ymm2
 ; X64-NEXT:    vpgatherqq %ymm0, (,%ymm2), %ymm1
@@ -664,7 +644,6 @@ define <4 x double> @masked_gather_v4dou
 ; X86-LABEL: masked_gather_v4double:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    vpslld $31, %xmm0, %xmm0
-; X86-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X86-NEXT:    vpmovsxdq %xmm0, %ymm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovapd (%eax), %xmm2
@@ -675,7 +654,6 @@ define <4 x double> @masked_gather_v4dou
 ; X64-LABEL: masked_gather_v4double:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpslld $31, %xmm0, %xmm0
-; X64-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-NEXT:    vpmovsxdq %xmm0, %ymm0
 ; X64-NEXT:    vmovapd (%rdi), %ymm2
 ; X64-NEXT:    vgatherqpd %ymm0, (,%ymm2), %ymm1
@@ -740,20 +718,16 @@ declare <2 x i64> @llvm.masked.gather.v2
 define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i64> %passthro) {
 ; X86-LABEL: masked_gather_v2i64:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
-; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
-; X86-NEXT:    vpgatherqq %xmm0, (,%xmm3), %xmm1
+; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X86-NEXT:    vpgatherqq %xmm0, (,%xmm2), %xmm1
 ; X86-NEXT:    vmovdqa %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: masked_gather_v2i64:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa (%rdi), %xmm2
 ; X64-NEXT:    vpgatherqq %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vmovdqa %xmm1, %xmm0
@@ -791,20 +765,16 @@ declare <2 x double> @llvm.masked.gather
 define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks, <2 x double> %passthro) {
 ; X86-LABEL: masked_gather_v2double:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X86-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
-; X86-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
-; X86-NEXT:    vgatherqpd %xmm0, (,%xmm3), %xmm1
+; X86-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X86-NEXT:    vpsllq $63, %xmm0, %xmm0
+; X86-NEXT:    vgatherqpd %xmm0, (,%xmm2), %xmm1
 ; X86-NEXT:    vmovapd %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: masked_gather_v2double:
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    vpsllq $63, %xmm0, %xmm0
-; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X64-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovapd (%rdi), %xmm2
 ; X64-NEXT:    vgatherqpd %xmm0, (,%xmm2), %xmm1
 ; X64-NEXT:    vmovapd %xmm1, %xmm0




More information about the llvm-commits mailing list