[llvm] r363802 - [TargetLowering] SimplifyDemandedBits SIGN_EXTEND_VECTOR_INREG -> ANY/ZERO_EXTEND_VECTOR_INREG
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 19 06:58:02 PDT 2019
Author: rksimon
Date: Wed Jun 19 06:58:02 2019
New Revision: 363802
URL: http://llvm.org/viewvc/llvm-project?rev=363802&view=rev
Log:
[TargetLowering] SimplifyDemandedBits SIGN_EXTEND_VECTOR_INREG -> ANY/ZERO_EXTEND_VECTOR_INREG
Simplify SIGN_EXTEND_VECTOR_INREG if the extended bits are not required/known zero.
Matches what we already do for SIGN_EXTEND.
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/trunk/test/CodeGen/X86/pmul.ll
llvm/trunk/test/CodeGen/X86/xop-ifma.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=363802&r1=363801&r2=363802&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp Wed Jun 19 06:58:02 2019
@@ -1413,9 +1413,11 @@ bool TargetLowering::SimplifyDemandedBit
bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG;
// If none of the top bits are demanded, convert this into an any_extend.
- // TODO: Add SIGN_EXTEND_VECTOR_INREG - ANY_EXTEND_VECTOR_INREG fold.
- if (DemandedBits.getActiveBits() <= InBits && !IsVecInReg)
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, Src));
+ if (DemandedBits.getActiveBits() <= InBits)
+ return TLO.CombineTo(
+ Op, TLO.DAG.getNode(IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG
+ : ISD::ANY_EXTEND,
+ dl, VT, Src));
APInt InDemandedBits = DemandedBits.trunc(InBits);
APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
@@ -1434,9 +1436,11 @@ bool TargetLowering::SimplifyDemandedBit
Known = Known.sext(BitWidth);
// If the sign bit is known zero, convert this to a zero extend.
- // TODO: Add SIGN_EXTEND_VECTOR_INREG - ZERO_EXTEND_VECTOR_INREG fold.
- if (Known.isNonNegative() && !IsVecInReg)
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Src));
+ if (Known.isNonNegative())
+ return TLO.CombineTo(
+ Op, TLO.DAG.getNode(IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG
+ : ISD::ZERO_EXTEND,
+ dl, VT, Src));
break;
}
case ISD::ANY_EXTEND: {
Modified: llvm/trunk/test/CodeGen/X86/pmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=363802&r1=363801&r2=363802&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Wed Jun 19 06:58:02 2019
@@ -1326,15 +1326,13 @@ define <8 x i64> @mul_v8i64_sext(<8 x i1
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
; SSE41-NEXT: pmovsxwq %xmm0, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
; SSE41-NEXT: pmuldq %xmm4, %xmm3
-; SSE41-NEXT: pmovsxdq %xmm2, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; SSE41-NEXT: pmuldq %xmm5, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3]
; SSE41-NEXT: pmuldq %xmm6, %xmm4
-; SSE41-NEXT: pmovsxdq %xmm1, %xmm0
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pmuldq %xmm7, %xmm0
; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/xop-ifma.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/xop-ifma.ll?rev=363802&r1=363801&r2=363802&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/xop-ifma.ll (original)
+++ llvm/trunk/test/CodeGen/X86/xop-ifma.ll Wed Jun 19 06:58:02 2019
@@ -67,12 +67,10 @@ define <8 x i32> @test_mul_v8i32_add_v8i
define <4 x i64> @test_mulx_v4i32_add_v4i64(<4 x i32> %a0, <4 x i32> %a1, <4 x i64> %a2) {
; XOP-AVX1-LABEL: test_mulx_v4i32_add_v4i64:
; XOP-AVX1: # %bb.0:
-; XOP-AVX1-NEXT: vpmovsxdq %xmm0, %xmm3
-; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; XOP-AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; XOP-AVX1-NEXT: vpmovsxdq %xmm1, %xmm4
-; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; XOP-AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; XOP-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; XOP-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
+; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; XOP-AVX1-NEXT: vpmacsdql %xmm5, %xmm1, %xmm0, %xmm0
; XOP-AVX1-NEXT: vpmacsdql %xmm2, %xmm4, %xmm3, %xmm1
More information about the llvm-commits
mailing list