[llvm] r370404 - [X86][SSE] combinePMULDQ - pmuldq(x, 0) -> zero vector (PR43159)
Hans Wennborg via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 30 02:00:58 PDT 2019
Merged to release_90 in r370445.
On Thu, Aug 29, 2019 at 10:20 PM Simon Pilgrim via llvm-commits
<llvm-commits at lists.llvm.org> wrote:
>
> Author: rksimon
> Date: Thu Aug 29 13:22:08 2019
> New Revision: 370404
>
> URL: http://llvm.org/viewvc/llvm-project?rev=370404&view=rev
> Log:
> [X86][SSE] combinePMULDQ - pmuldq(x, 0) -> zero vector (PR43159)
>
> ISD::isBuildVectorAllZeros permits undef elements to be present, which means we can't return it as a zero vector. PMULDQ/PMULUDQ is an extending multiply so a multiply by zero of the lower 32-bits should result in a zero 64-bit element.
>
> Modified:
> llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=370404&r1=370403&r2=370404&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Aug 29 13:22:08 2019
> @@ -44660,7 +44660,8 @@ static SDValue combineScalarToVector(SDN
>
> // Simplify PMULDQ and PMULUDQ operations.
> static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
> - TargetLowering::DAGCombinerInfo &DCI) {
> + TargetLowering::DAGCombinerInfo &DCI,
> + const X86Subtarget &Subtarget) {
> SDValue LHS = N->getOperand(0);
> SDValue RHS = N->getOperand(1);
>
> @@ -44670,8 +44671,9 @@ static SDValue combinePMULDQ(SDNode *N,
> return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
>
> // Multiply by zero.
> + // Don't return RHS as it may contain UNDEFs.
> if (ISD::isBuildVectorAllZeros(RHS.getNode()))
> - return RHS;
> + return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
>
> // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
> const TargetLowering &TLI = DAG.getTargetLoweringInfo();
> @@ -44896,7 +44898,7 @@ SDValue X86TargetLowering::PerformDAGCom
> case X86ISD::PCMPEQ:
> case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
> case X86ISD::PMULDQ:
> - case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
> + case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
> }
>
> return SDValue();
>
> Modified: llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll?rev=370404&r1=370403&r2=370404&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll Thu Aug 29 13:22:08 2019
> @@ -172,3 +172,118 @@ bb34:
> %tmp35 = add <4 x i64> %tmp29, %tmp28
> ret void
> }
> +
> +define i32 @PR43159(<4 x i32>* %a0) {
> +; SSE-LABEL: PR43159:
> +; SSE: # %bb.0: # %entry
> +; SSE-NEXT: movdqa (%rdi), %xmm0
> +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
> +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
> +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
> +; SSE-NEXT: pmuludq %xmm2, %xmm3
> +; SSE-NEXT: movdqa %xmm0, %xmm2
> +; SSE-NEXT: psrld $1, %xmm2
> +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
> +; SSE-NEXT: pmuludq %xmm1, %xmm2
> +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
> +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
> +; SSE-NEXT: psubd %xmm1, %xmm0
> +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
> +; SSE-NEXT: pmuludq {{.*}}(%rip), %xmm0
> +; SSE-NEXT: pxor %xmm2, %xmm2
> +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
> +; SSE-NEXT: paddd %xmm1, %xmm2
> +; SSE-NEXT: movdqa %xmm2, %xmm0
> +; SSE-NEXT: psrld $7, %xmm0
> +; SSE-NEXT: psrld $6, %xmm2
> +; SSE-NEXT: movd %xmm2, %edi
> +; SSE-NEXT: pextrd $1, %xmm0, %esi
> +; SSE-NEXT: pextrd $2, %xmm2, %edx
> +; SSE-NEXT: pextrd $3, %xmm0, %ecx
> +; SSE-NEXT: jmp foo # TAILCALL
> +;
> +; AVX2-LABEL: PR43159:
> +; AVX2: # %bb.0: # %entry
> +; AVX2-NEXT: vmovdqa (%rdi), %xmm0
> +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
> +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
> +; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
> +; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
> +; AVX2-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
> +; AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
> +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
> +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
> +; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
> +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
> +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
> +; AVX2-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
> +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
> +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
> +; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
> +; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
> +; AVX2-NEXT: vmovd %xmm0, %edi
> +; AVX2-NEXT: vpextrd $1, %xmm0, %esi
> +; AVX2-NEXT: vpextrd $2, %xmm0, %edx
> +; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
> +; AVX2-NEXT: jmp foo # TAILCALL
> +;
> +; AVX512VL-LABEL: PR43159:
> +; AVX512VL: # %bb.0: # %entry
> +; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
> +; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
> +; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
> +; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
> +; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
> +; AVX512VL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
> +; AVX512VL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
> +; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
> +; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
> +; AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
> +; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
> +; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
> +; AVX512VL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
> +; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
> +; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
> +; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
> +; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
> +; AVX512VL-NEXT: vmovd %xmm0, %edi
> +; AVX512VL-NEXT: vpextrd $1, %xmm0, %esi
> +; AVX512VL-NEXT: vpextrd $2, %xmm0, %edx
> +; AVX512VL-NEXT: vpextrd $3, %xmm0, %ecx
> +; AVX512VL-NEXT: jmp foo # TAILCALL
> +;
> +; AVX512DQVL-LABEL: PR43159:
> +; AVX512DQVL: # %bb.0: # %entry
> +; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0
> +; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
> +; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
> +; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
> +; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
> +; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
> +; AVX512DQVL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
> +; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
> +; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
> +; AVX512DQVL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
> +; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
> +; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
> +; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
> +; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
> +; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
> +; AVX512DQVL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
> +; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
> +; AVX512DQVL-NEXT: vmovd %xmm0, %edi
> +; AVX512DQVL-NEXT: vpextrd $1, %xmm0, %esi
> +; AVX512DQVL-NEXT: vpextrd $2, %xmm0, %edx
> +; AVX512DQVL-NEXT: vpextrd $3, %xmm0, %ecx
> +; AVX512DQVL-NEXT: jmp foo # TAILCALL
> +entry:
> + %0 = load <4 x i32>, <4 x i32>* %a0, align 16
> + %div = udiv <4 x i32> %0, <i32 167, i32 237, i32 254, i32 177>
> + %ext0 = extractelement <4 x i32> %div, i32 0
> + %ext1 = extractelement <4 x i32> %div, i32 1
> + %ext2 = extractelement <4 x i32> %div, i32 2
> + %ext3 = extractelement <4 x i32> %div, i32 3
> + %call = tail call i32 @foo(i32 %ext0, i32 %ext1, i32 %ext2, i32 %ext3)
> + ret i32 %call
> +}
> +declare dso_local i32 @foo(i32, i32, i32, i32)
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list