[llvm] r289200 - [SelectionDAG] Add partial BITCAST support to computeKnownBits
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 9 02:13:46 PST 2016
Author: rksimon
Date: Fri Dec 9 04:13:45 2016
New Revision: 289200
URL: http://llvm.org/viewvc/llvm-project?rev=289200&view=rev
Log:
[SelectionDAG] Add partial BITCAST support to computeKnownBits
Adds support for bitcasting a little endian 'small element' vector to 'large element' scalar/vector (e.g. v16i8 to v4i32 or v2i32 to i64), which is required for PR30845. We extract the knownbits for each 'small element' part and concatenate the results together.
We can add support for big endian and 'large element' scalar/vector to 'small element' vector bitcasting once we have test cases for them.
Differential Revision: https://reviews.llvm.org/D27129
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/trunk/test/CodeGen/X86/pmul.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=289200&r1=289199&r2=289200&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Fri Dec 9 04:13:45 2016
@@ -2148,6 +2148,50 @@ void SelectionDAG::computeKnownBits(SDVa
}
break;
}
+ case ISD::BITCAST: {
+ SDValue N0 = Op.getOperand(0);
+ unsigned SubBitWidth = N0.getScalarValueSizeInBits();
+
+ // Ignore bitcasts from floating point.
+ if (!N0.getValueType().isInteger())
+ break;
+
+ // Fast handling of 'identity' bitcasts.
+ if (BitWidth == SubBitWidth) {
+ computeKnownBits(N0, KnownZero, KnownOne, DemandedElts, Depth + 1);
+ break;
+ }
+
+ // Support big-endian targets when it becomes useful.
+ bool IsLE = getDataLayout().isLittleEndian();
+ if (!IsLE)
+ break;
+
+ // Bitcast 'small element' vector to 'large element' scalar/vector.
+ if ((BitWidth % SubBitWidth) == 0) {
+ assert(N0.getValueType().isVector() && "Expected bitcast from vector");
+
+ // Collect known bits for the (larger) output by collecting the known
+ // bits from each set of sub elements and shift these into place.
+ // We need to separately call computeKnownBits for each set of
+ // sub elements as the knownbits for each is likely to be different.
+ unsigned SubScale = BitWidth / SubBitWidth;
+ APInt SubDemandedElts(NumElts * SubScale, 0);
+ for (unsigned i = 0; i != NumElts; ++i)
+ if (DemandedElts[i])
+ SubDemandedElts.setBit(i * SubScale);
+
+ for (unsigned i = 0; i != SubScale; ++i) {
+ computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts.shl(i),
+ Depth + 1);
+ KnownOne |= KnownOne2.zext(BitWidth).shl(SubBitWidth * i);
+ KnownZero |= KnownZero2.zext(BitWidth).shl(SubBitWidth * i);
+ }
+ }
+
+ // TODO - support ((SubBitWidth % BitWidth) == 0) when it becomes useful.
+ break;
+ }
case ISD::AND:
// If either the LHS or the RHS are Zero, the result is zero.
computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
Modified: llvm/trunk/test/CodeGen/X86/pmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=289200&r1=289199&r2=289200&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Fri Dec 9 04:13:45 2016
@@ -1153,71 +1153,31 @@ entry:
define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pmuludq %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlq $32, %xmm5
-; SSE2-NEXT: pmuludq %xmm0, %xmm5
-; SSE2-NEXT: psllq $32, %xmm5
-; SSE2-NEXT: psrlq $32, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: psllq $32, %xmm0
-; SSE2-NEXT: paddq %xmm5, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm2, %xmm4
-; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm2
-; SSE2-NEXT: pmuludq %xmm3, %xmm2
-; SSE2-NEXT: psllq $32, %xmm2
-; SSE2-NEXT: paddq %xmm4, %xmm2
-; SSE2-NEXT: paddq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: pmuludq %xmm0, %xmm1
+; SSE2-NEXT: pmuludq %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm3, %xmm3
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm5
-; SSE41-NEXT: psrlq $32, %xmm5
-; SSE41-NEXT: pmuludq %xmm0, %xmm5
-; SSE41-NEXT: psllq $32, %xmm5
-; SSE41-NEXT: psrlq $32, %xmm0
-; SSE41-NEXT: pmuludq %xmm1, %xmm0
-; SSE41-NEXT: psllq $32, %xmm0
-; SSE41-NEXT: paddq %xmm5, %xmm0
-; SSE41-NEXT: paddq %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: pmuludq %xmm4, %xmm1
-; SSE41-NEXT: movdqa %xmm4, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: pmuludq %xmm4, %xmm2
-; SSE41-NEXT: psllq $32, %xmm2
-; SSE41-NEXT: paddq %xmm3, %xmm2
-; SSE41-NEXT: paddq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: pmuludq %xmm0, %xmm1
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
@@ -1255,67 +1215,47 @@ entry:
define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper_left:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pmuludq %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlq $32, %xmm5
-; SSE2-NEXT: pmuludq %xmm0, %xmm5
-; SSE2-NEXT: psllq $32, %xmm5
-; SSE2-NEXT: psrlq $32, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: psllq $32, %xmm0
-; SSE2-NEXT: paddq %xmm5, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm3, %xmm4
-; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: psllq $32, %xmm3
-; SSE2-NEXT: paddq %xmm4, %xmm3
-; SSE2-NEXT: paddq %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: psrlq $32, %xmm1
+; SSE2-NEXT: pmuludq %xmm0, %xmm1
+; SSE2-NEXT: psllq $32, %xmm1
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: psrlq $32, %xmm2
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: psllq $32, %xmm2
+; SSE2-NEXT: paddq %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper_left:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pmuludq %xmm1, %xmm4
-; SSE41-NEXT: movdqa %xmm1, %xmm5
-; SSE41-NEXT: psrlq $32, %xmm5
-; SSE41-NEXT: pmuludq %xmm3, %xmm5
-; SSE41-NEXT: psllq $32, %xmm5
-; SSE41-NEXT: psrlq $32, %xmm3
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE41-NEXT: movdqa %xmm4, %xmm3
; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: paddq %xmm5, %xmm3
-; SSE41-NEXT: paddq %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pmuludq %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psrlq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm0, %xmm4
-; SSE41-NEXT: psllq $32, %xmm4
-; SSE41-NEXT: psrlq $32, %xmm0
-; SSE41-NEXT: pmuludq %xmm2, %xmm0
-; SSE41-NEXT: psllq $32, %xmm0
-; SSE41-NEXT: paddq %xmm4, %xmm0
-; SSE41-NEXT: paddq %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrlq $32, %xmm1
+; SSE41-NEXT: pmuludq %xmm4, %xmm1
+; SSE41-NEXT: psllq $32, %xmm1
+; SSE41-NEXT: paddq %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: psrlq $32, %xmm2
+; SSE41-NEXT: pmuludq %xmm0, %xmm2
+; SSE41-NEXT: psllq $32, %xmm2
+; SSE41-NEXT: paddq %xmm3, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v4i64_zero_upper_left:
@@ -1357,72 +1297,40 @@ entry:
define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_lower:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [0,4294967295,0,4294967295]
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pmuludq %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlq $32, %xmm5
-; SSE2-NEXT: pmuludq %xmm0, %xmm5
-; SSE2-NEXT: psllq $32, %xmm5
-; SSE2-NEXT: psrlq $32, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: psllq $32, %xmm0
-; SSE2-NEXT: paddq %xmm5, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm3, %xmm4
-; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: psllq $32, %xmm3
-; SSE2-NEXT: paddq %xmm4, %xmm3
-; SSE2-NEXT: paddq %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,4294967295,0,4294967295]
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: psrlq $32, %xmm1
+; SSE2-NEXT: pmuludq %xmm0, %xmm1
+; SSE2-NEXT: psllq $32, %xmm1
+; SSE2-NEXT: psrlq $32, %xmm2
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: psllq $32, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_lower:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3],xmm4[4,5],xmm1[6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3],xmm4[4,5],xmm2[6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pmuludq %xmm2, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm5
-; SSE41-NEXT: psrlq $32, %xmm5
-; SSE41-NEXT: pmuludq %xmm0, %xmm5
-; SSE41-NEXT: psllq $32, %xmm5
-; SSE41-NEXT: psrlq $32, %xmm0
-; SSE41-NEXT: pmuludq %xmm2, %xmm0
-; SSE41-NEXT: psllq $32, %xmm0
-; SSE41-NEXT: paddq %xmm5, %xmm0
-; SSE41-NEXT: paddq %xmm4, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: psrlq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm3, %xmm4
-; SSE41-NEXT: psllq $32, %xmm4
-; SSE41-NEXT: psrlq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: paddq %xmm4, %xmm3
-; SSE41-NEXT: paddq %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; SSE41-NEXT: psrlq $32, %xmm2
+; SSE41-NEXT: pmuludq %xmm0, %xmm2
+; SSE41-NEXT: psllq $32, %xmm2
+; SSE41-NEXT: psrlq $32, %xmm1
+; SSE41-NEXT: pmuludq %xmm4, %xmm1
+; SSE41-NEXT: psllq $32, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v4i64_zero_lower:
@@ -1430,11 +1338,9 @@ define <4 x i32> @mul_v4i64_zero_lower(<
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -1447,11 +1353,9 @@ define <4 x i32> @mul_v4i64_zero_lower(<
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -1469,131 +1373,51 @@ entry:
define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_zero_upper:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm9
-; SSE2-NEXT: pmuludq %xmm3, %xmm9
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: psrlq $32, %xmm6
-; SSE2-NEXT: pmuludq %xmm1, %xmm6
-; SSE2-NEXT: psllq $32, %xmm6
-; SSE2-NEXT: psrlq $32, %xmm1
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
-; SSE2-NEXT: psllq $32, %xmm1
-; SSE2-NEXT: paddq %xmm6, %xmm1
-; SSE2-NEXT: paddq %xmm9, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pmuludq %xmm7, %xmm3
-; SSE2-NEXT: movdqa %xmm7, %xmm6
-; SSE2-NEXT: psrlq $32, %xmm6
-; SSE2-NEXT: pmuludq %xmm4, %xmm6
-; SSE2-NEXT: psllq $32, %xmm6
-; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm7, %xmm4
-; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: paddq %xmm6, %xmm4
-; SSE2-NEXT: paddq %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: psrlq $32, %xmm6
-; SSE2-NEXT: pmuludq %xmm0, %xmm6
-; SSE2-NEXT: psllq $32, %xmm6
-; SSE2-NEXT: psrlq $32, %xmm0
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
-; SSE2-NEXT: psllq $32, %xmm0
-; SSE2-NEXT: paddq %xmm6, %xmm0
-; SSE2-NEXT: paddq %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm2
-; SSE2-NEXT: pmuludq %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm8, %xmm3
-; SSE2-NEXT: psrlq $32, %xmm3
-; SSE2-NEXT: pmuludq %xmm5, %xmm3
-; SSE2-NEXT: psllq $32, %xmm3
-; SSE2-NEXT: psrlq $32, %xmm5
-; SSE2-NEXT: pmuludq %xmm8, %xmm5
-; SSE2-NEXT: psllq $32, %xmm5
-; SSE2-NEXT: paddq %xmm3, %xmm5
-; SSE2-NEXT: paddq %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: pmuludq %xmm6, %xmm5
+; SSE2-NEXT: pmuludq %xmm0, %xmm2
+; SSE2-NEXT: pmuludq %xmm8, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_zero_upper:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm6, %xmm6
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm9 = xmm3[0],zero,xmm3[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; SSE41-NEXT: movdqa %xmm1, %xmm6
-; SSE41-NEXT: pmuludq %xmm3, %xmm6
-; SSE41-NEXT: movdqa %xmm3, %xmm7
-; SSE41-NEXT: psrlq $32, %xmm7
-; SSE41-NEXT: pmuludq %xmm1, %xmm7
-; SSE41-NEXT: psllq $32, %xmm7
-; SSE41-NEXT: psrlq $32, %xmm1
-; SSE41-NEXT: pmuludq %xmm3, %xmm1
-; SSE41-NEXT: psllq $32, %xmm1
-; SSE41-NEXT: paddq %xmm7, %xmm1
-; SSE41-NEXT: paddq %xmm6, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm2, %xmm6
-; SSE41-NEXT: psrlq $32, %xmm6
-; SSE41-NEXT: pmuludq %xmm0, %xmm6
-; SSE41-NEXT: psllq $32, %xmm6
-; SSE41-NEXT: psrlq $32, %xmm0
-; SSE41-NEXT: pmuludq %xmm2, %xmm0
-; SSE41-NEXT: psllq $32, %xmm0
-; SSE41-NEXT: paddq %xmm6, %xmm0
-; SSE41-NEXT: paddq %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm2
-; SSE41-NEXT: pmuludq %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm9, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm4, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm9, %xmm4
-; SSE41-NEXT: psllq $32, %xmm4
-; SSE41-NEXT: paddq %xmm3, %xmm4
-; SSE41-NEXT: paddq %xmm2, %xmm4
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pmuludq %xmm8, %xmm2
-; SSE41-NEXT: movdqa %xmm8, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm5, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm5
-; SSE41-NEXT: pmuludq %xmm8, %xmm5
-; SSE41-NEXT: psllq $32, %xmm5
-; SSE41-NEXT: paddq %xmm3, %xmm5
-; SSE41-NEXT: paddq %xmm2, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm3[0],zero,xmm3[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE41-NEXT: pmuludq %xmm1, %xmm3
+; SSE41-NEXT: pmuludq %xmm0, %xmm2
+; SSE41-NEXT: pmuludq %xmm6, %xmm5
+; SSE41-NEXT: pmuludq %xmm8, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,3,2,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,3,2,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: retq
;
@@ -1605,24 +1429,8 @@ define <8 x i32> @mul_v8i64_zero_upper(<
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm4
-; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm5
-; AVX2-NEXT: vpmuludq %ymm5, %ymm0, %ymm5
-; AVX2-NEXT: vpsllq $32, %ymm5, %ymm5
-; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
-; AVX2-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm1
-; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm4
-; AVX2-NEXT: vpmuludq %ymm4, %ymm2, %ymm4
-; AVX2-NEXT: vpsllq $32, %ymm4, %ymm4
-; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm2
-; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm4, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
More information about the llvm-commits
mailing list