[llvm-branch-commits] [llvm-branch] r370445 - Merging r370404:
Hans Wennborg via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Aug 30 02:02:10 PDT 2019
Author: hans
Date: Fri Aug 30 02:02:10 2019
New Revision: 370445
URL: http://llvm.org/viewvc/llvm-project?rev=370445&view=rev
Log:
Merging r370404:
------------------------------------------------------------------------
r370404 | rksimon | 2019-08-29 22:22:08 +0200 (Thu, 29 Aug 2019) | 3 lines
[X86][SSE] combinePMULDQ - pmuldq(x, 0) -> zero vector (PR43159)
ISD::isBuildVectorAllZeros permits undef elements to be present, which means we can't return it as a zero vector. PMULDQ/PMULUDQ is an extending multiply so a multiply by zero of the lower 32-bits should result in a zero 64-bit element.
------------------------------------------------------------------------
Modified:
llvm/branches/release_90/ (props changed)
llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp
llvm/branches/release_90/test/CodeGen/X86/combine-pmuldq.ll
Propchange: llvm/branches/release_90/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Aug 30 02:02:10 2019
@@ -1,3 +1,3 @@
/llvm/branches/Apple/Pertwee:110850,110961
/llvm/branches/type-system-rewrite:133420-134817
-/llvm/trunk:155241,366431,366447,366481,366487,366527,366570,366660,366868,366925,367019,367030,367062,367084,367124,367215,367292,367304,367306,367314,367340-367341,367394,367396,367398,367403,367412,367417,367429,367580,367662,367750,367753,367846-367847,367898,367941,368004,368164,368230,368300,368315,368324,368477-368478,368517-368519,368554,368572,368873,369011,369026,369084,369095,369097,369168,369199,369426,369443,369886,370036,370176,370204,370271
+/llvm/trunk:155241,366431,366447,366481,366487,366527,366570,366660,366868,366925,367019,367030,367062,367084,367124,367215,367292,367304,367306,367314,367340-367341,367394,367396,367398,367403,367412,367417,367429,367580,367662,367750,367753,367846-367847,367898,367941,368004,368164,368230,368300,368315,368324,368477-368478,368517-368519,368554,368572,368873,369011,369026,369084,369095,369097,369168,369199,369426,369443,369886,370036,370176,370204,370271,370404
Modified: llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp?rev=370445&r1=370444&r2=370445&view=diff
==============================================================================
--- llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp Fri Aug 30 02:02:10 2019
@@ -44104,7 +44104,8 @@ static SDValue combineScalarToVector(SDN
// Simplify PMULDQ and PMULUDQ operations.
static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -44114,8 +44115,9 @@ static SDValue combinePMULDQ(SDNode *N,
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
// Multiply by zero.
+ // Don't return RHS as it may contain UNDEFs.
if (ISD::isBuildVectorAllZeros(RHS.getNode()))
- return RHS;
+ return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
// Aggressively peek through ops to get at the demanded low bits.
APInt DemandedMask = APInt::getLowBitsSet(64, 32);
@@ -44323,7 +44325,7 @@ SDValue X86TargetLowering::PerformDAGCom
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
case X86ISD::PMULDQ:
- case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
+ case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
}
return SDValue();
Modified: llvm/branches/release_90/test/CodeGen/X86/combine-pmuldq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_90/test/CodeGen/X86/combine-pmuldq.ll?rev=370445&r1=370444&r2=370445&view=diff
==============================================================================
--- llvm/branches/release_90/test/CodeGen/X86/combine-pmuldq.ll (original)
+++ llvm/branches/release_90/test/CodeGen/X86/combine-pmuldq.ll Fri Aug 30 02:02:10 2019
@@ -172,3 +172,118 @@ bb34:
%tmp35 = add <4 x i64> %tmp29, %tmp28
ret void
}
+
+define i32 @PR43159(<4 x i32>* %a0) {
+; SSE-LABEL: PR43159:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movdqa (%rdi), %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT: psubd %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT: pmuludq {{.*}}(%rip), %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: psrld $7, %xmm0
+; SSE-NEXT: psrld $6, %xmm2
+; SSE-NEXT: movd %xmm2, %edi
+; SSE-NEXT: pextrd $1, %xmm0, %esi
+; SSE-NEXT: pextrd $2, %xmm2, %edx
+; SSE-NEXT: pextrd $3, %xmm0, %ecx
+; SSE-NEXT: jmp foo # TAILCALL
+;
+; AVX2-LABEL: PR43159:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vmovdqa (%rdi), %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
+; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX2-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
+; AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX2-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %edi
+; AVX2-NEXT: vpextrd $1, %xmm0, %esi
+; AVX2-NEXT: vpextrd $2, %xmm0, %edx
+; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT: jmp foo # TAILCALL
+;
+; AVX512VL-LABEL: PR43159:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512VL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
+; AVX512VL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX512VL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: vmovd %xmm0, %edi
+; AVX512VL-NEXT: vpextrd $1, %xmm0, %esi
+; AVX512VL-NEXT: vpextrd $2, %xmm0, %edx
+; AVX512VL-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX512VL-NEXT: jmp foo # TAILCALL
+;
+; AVX512DQVL-LABEL: PR43159:
+; AVX512DQVL: # %bb.0: # %entry
+; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
+; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
+; AVX512DQVL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX512DQVL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
+; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX512DQVL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT: vmovd %xmm0, %edi
+; AVX512DQVL-NEXT: vpextrd $1, %xmm0, %esi
+; AVX512DQVL-NEXT: vpextrd $2, %xmm0, %edx
+; AVX512DQVL-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX512DQVL-NEXT: jmp foo # TAILCALL
+entry:
+ %0 = load <4 x i32>, <4 x i32>* %a0, align 16
+ %div = udiv <4 x i32> %0, <i32 167, i32 237, i32 254, i32 177>
+ %ext0 = extractelement <4 x i32> %div, i32 0
+ %ext1 = extractelement <4 x i32> %div, i32 1
+ %ext2 = extractelement <4 x i32> %div, i32 2
+ %ext3 = extractelement <4 x i32> %div, i32 3
+ %call = tail call i32 @foo(i32 %ext0, i32 %ext1, i32 %ext2, i32 %ext3)
+ ret i32 %call
+}
+declare dso_local i32 @foo(i32, i32, i32, i32)
More information about the llvm-branch-commits
mailing list