[llvm] r351819 - [X86][SSE] Canonicalize OR(AND(X, C), AND(Y, ~C)) -> OR(AND(X, C), ANDNP(C, Y))
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 22 05:44:49 PST 2019
Author: rksimon
Date: Tue Jan 22 05:44:49 2019
New Revision: 351819
URL: http://llvm.org/viewvc/llvm-project?rev=351819&view=rev
Log:
[X86][SSE] Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
For constant bit select patterns, replace one AND with a ANDNP, allowing us to reuse the constant mask. Only do this if the mask has multiple uses (to avoid losing load folding) or if we have XOP as its VPCMOV can handle most folding commutations.
This also requires computeKnownBitsForTargetNode support for X86ISD::ANDNP and X86ISD::FOR to prevent regressions in fabs/fcopysign patterns.
Differential Revision: https://reviews.llvm.org/D55935
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/bitreverse.ll
llvm/trunk/test/CodeGen/X86/combine-bitselect.ll
llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll
llvm/trunk/test/CodeGen/X86/vec-copysign.ll
llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll
llvm/trunk/test/CodeGen/X86/vector-fshl-256.ll
llvm/trunk/test/CodeGen/X86/vector-fshl-512.ll
llvm/trunk/test/CodeGen/X86/vector-fshl-rot-256.ll
llvm/trunk/test/CodeGen/X86/vector-fshl-rot-512.ll
llvm/trunk/test/CodeGen/X86/vector-fshr-256.ll
llvm/trunk/test/CodeGen/X86/vector-fshr-512.ll
llvm/trunk/test/CodeGen/X86/vector-fshr-rot-256.ll
llvm/trunk/test/CodeGen/X86/vector-fshr-rot-512.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jan 22 05:44:49 2019
@@ -30233,6 +30233,27 @@ void X86TargetLowering::computeKnownBits
Known = Known.trunc(BitWidth);
break;
}
+ case X86ISD::ANDNP: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+
+ // ANDNP = (~X & Y);
+ Known.One &= Known2.Zero;
+ Known.Zero |= Known2.One;
+ break;
+ }
+ case X86ISD::FOR: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+
+ // Output known-0 bits are only known if clear in both the LHS & RHS.
+ Known.Zero &= Known2.Zero;
+ // Output known-1 are known to be set if set in either the LHS | RHS.
+ Known.One |= Known2.One;
+ break;
+ }
case X86ISD::CMOV: {
Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
// If we don't know any bits, early out.
@@ -36519,6 +36540,52 @@ static SDValue combineAnd(SDNode *N, Sel
return SDValue();
}
+// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
+static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
+
+ EVT VT = N->getValueType(0);
+ if (!VT.isVector())
+ return SDValue();
+
+ SDValue N0 = peekThroughBitcasts(N->getOperand(0));
+ SDValue N1 = peekThroughBitcasts(N->getOperand(1));
+ if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
+ return SDValue();
+
+ // On XOP we'll lower to PCMOV so accept one use, otherwise only
+ // do this if either mask has multiple uses already.
+ if (!(Subtarget.hasXOP() || !N0.getOperand(1).hasOneUse() ||
+ !N1.getOperand(1).hasOneUse()))
+ return SDValue();
+
+ // Attempt to extract constant byte masks.
+ APInt UndefElts0, UndefElts1;
+ SmallVector<APInt, 32> EltBits0, EltBits1;
+ if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
+ false, false))
+ return SDValue();
+ if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
+ false, false))
+ return SDValue();
+
+ for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
+ // TODO - add UNDEF elts support.
+ if (UndefElts0[i] || UndefElts1[i])
+ return SDValue();
+ if (EltBits0[i] != ~EltBits1[i])
+ return SDValue();
+ }
+
+ SDLoc DL(N);
+ SDValue X = N->getOperand(0);
+ SDValue Y =
+ DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
+ DAG.getBitcast(VT, N1.getOperand(0)));
+ return DAG.getNode(ISD::OR, DL, VT, X, Y);
+}
+
// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
if (N->getOpcode() != ISD::OR)
@@ -36781,6 +36848,9 @@ static SDValue combineOr(SDNode *N, Sele
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
+ if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
+ return R;
+
if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
return R;
Modified: llvm/trunk/test/CodeGen/X86/bitreverse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitreverse.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitreverse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bitreverse.ll Tue Jan 22 05:44:49 2019
@@ -61,18 +61,17 @@ define <2 x i16> @test_bitreverse_v2i16(
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; X64-NEXT: packuswb %xmm2, %xmm0
-; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pand %xmm1, %xmm2
+; X64-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4]
+; X64-NEXT: packuswb %xmm2, %xmm1
+; X64-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X64-NEXT: movdqa %xmm1, %xmm2
+; X64-NEXT: pand %xmm0, %xmm2
; X64-NEXT: psllw $4, %xmm2
-; X64-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; X64-NEXT: pand %xmm3, %xmm2
-; X64-NEXT: pand %xmm3, %xmm0
-; X64-NEXT: psrlw $4, %xmm0
-; X64-NEXT: pand %xmm1, %xmm0
-; X64-NEXT: por %xmm2, %xmm0
+; X64-NEXT: pand {{.*}}(%rip), %xmm1
+; X64-NEXT: psrlw $4, %xmm1
+; X64-NEXT: pand %xmm0, %xmm1
+; X64-NEXT: pandn %xmm2, %xmm0
+; X64-NEXT: por %xmm1, %xmm0
; X64-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X64-NEXT: pand %xmm0, %xmm1
; X64-NEXT: psllw $2, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/combine-bitselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-bitselect.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-bitselect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-bitselect.ll Tue Jan 22 05:44:49 2019
@@ -19,9 +19,7 @@ define <2 x i64> @bitselect_v2i64_rr(<2
;
; XOP-LABEL: bitselect_v2i64_rr:
; XOP: # %bb.0:
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
-; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v2i64_rr:
@@ -47,10 +45,8 @@ define <2 x i64> @bitselect_v2i64_rm(<2
;
; XOP-LABEL: bitselect_v2i64_rm:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps (%rdi), %xmm1
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
-; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %xmm1
+; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v2i64_rm:
@@ -78,10 +74,8 @@ define <2 x i64> @bitselect_v2i64_mr(<2
;
; XOP-LABEL: bitselect_v2i64_mr:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps (%rdi), %xmm1
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %xmm1
+; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v2i64_mr:
@@ -110,11 +104,9 @@ define <2 x i64> @bitselect_v2i64_mm(<2
;
; XOP-LABEL: bitselect_v2i64_mm:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps (%rdi), %xmm0
-; XOP-NEXT: vmovaps (%rsi), %xmm1
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
-; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vmovdqa (%rsi), %xmm0
+; XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551612,18446744065119617022]
+; XOP-NEXT: vpcmov %xmm1, (%rdi), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v2i64_mm:
@@ -150,9 +142,7 @@ define <4 x i64> @bitselect_v4i64_rr(<4
;
; XOP-LABEL: bitselect_v4i64_rr:
; XOP: # %bb.0:
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
-; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v4i64_rr:
@@ -170,23 +160,24 @@ define <4 x i64> @bitselect_v4i64_rr(<4
define <4 x i64> @bitselect_v4i64_rm(<4 x i64>, <4 x i64>* nocapture readonly) {
; SSE-LABEL: bitselect_v4i64_rm:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm2 = [8589934593,3]
-; SSE-NEXT: andps %xmm2, %xmm1
-; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm2 = [18446744065119617022,18446744073709551612]
-; SSE-NEXT: movaps 16(%rdi), %xmm3
-; SSE-NEXT: andps %xmm2, %xmm3
-; SSE-NEXT: orps %xmm3, %xmm1
-; SSE-NEXT: andps (%rdi), %xmm2
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: movaps 16(%rdi), %xmm4
+; SSE-NEXT: andps %xmm2, %xmm4
+; SSE-NEXT: movaps (%rdi), %xmm5
+; SSE-NEXT: andps %xmm2, %xmm5
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andnps %xmm0, %xmm3
+; SSE-NEXT: orps %xmm5, %xmm3
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm4, %xmm2
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v4i64_rm:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps (%rdi), %ymm1
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
-; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v4i64_rm:
@@ -207,22 +198,23 @@ define <4 x i64> @bitselect_v4i64_mr(<4
; SSE-LABEL: bitselect_v4i64_mr:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [12884901890,4294967296]
-; SSE-NEXT: movaps 16(%rdi), %xmm3
-; SSE-NEXT: andps %xmm2, %xmm3
-; SSE-NEXT: andps (%rdi), %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm4 = [18446744060824649725,18446744069414584319]
-; SSE-NEXT: andps %xmm4, %xmm1
-; SSE-NEXT: orps %xmm3, %xmm1
-; SSE-NEXT: andps %xmm4, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: movaps 16(%rdi), %xmm4
+; SSE-NEXT: andps %xmm2, %xmm4
+; SSE-NEXT: movaps (%rdi), %xmm5
+; SSE-NEXT: andps %xmm2, %xmm5
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andnps %xmm0, %xmm3
+; SSE-NEXT: orps %xmm5, %xmm3
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm4, %xmm2
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v4i64_mr:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps (%rdi), %ymm1
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v4i64_mr:
@@ -242,25 +234,23 @@ define <4 x i64> @bitselect_v4i64_mr(<4
define <4 x i64> @bitselect_v4i64_mm(<4 x i64>* nocapture readonly, <4 x i64>* nocapture readonly) {
; SSE-LABEL: bitselect_v4i64_mm:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm2 = [3,8589934593]
-; SSE-NEXT: movaps 16(%rdi), %xmm3
-; SSE-NEXT: andps %xmm2, %xmm3
-; SSE-NEXT: andps (%rdi), %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551612,18446744065119617022]
-; SSE-NEXT: movaps 16(%rsi), %xmm1
-; SSE-NEXT: andps %xmm0, %xmm1
-; SSE-NEXT: orps %xmm3, %xmm1
-; SSE-NEXT: andps (%rsi), %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [18446744073709551612,18446744065119617022]
+; SSE-NEXT: movaps 16(%rsi), %xmm2
+; SSE-NEXT: andps %xmm1, %xmm2
+; SSE-NEXT: movaps (%rsi), %xmm3
+; SSE-NEXT: andps %xmm1, %xmm3
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: andnps (%rdi), %xmm0
+; SSE-NEXT: orps %xmm3, %xmm0
+; SSE-NEXT: andnps 16(%rdi), %xmm1
+; SSE-NEXT: orps %xmm2, %xmm1
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v4i64_mm:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps (%rdi), %ymm0
-; XOP-NEXT: vmovaps (%rsi), %ymm1
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
-; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOP-NEXT: vmovdqa (%rsi), %ymm0
+; XOP-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
+; XOP-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX-LABEL: bitselect_v4i64_mm:
@@ -286,58 +276,55 @@ define <4 x i64> @bitselect_v4i64_mm(<4
define <8 x i64> @bitselect_v8i64_rr(<8 x i64>, <8 x i64>) {
; SSE-LABEL: bitselect_v8i64_rr:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm8 = [12884901890,12884901890]
-; SSE-NEXT: andps %xmm8, %xmm3
-; SSE-NEXT: movaps {{.*#+}} xmm9 = [4294967296,12884901890]
-; SSE-NEXT: andps %xmm9, %xmm2
-; SSE-NEXT: andps %xmm8, %xmm1
-; SSE-NEXT: andps %xmm9, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm8 = [18446744060824649725,18446744060824649725]
; SSE-NEXT: andps %xmm8, %xmm7
-; SSE-NEXT: orps %xmm7, %xmm3
-; SSE-NEXT: movaps {{.*#+}} xmm7 = [18446744069414584319,18446744060824649725]
-; SSE-NEXT: andps %xmm7, %xmm6
-; SSE-NEXT: orps %xmm6, %xmm2
-; SSE-NEXT: andps %xmm5, %xmm8
-; SSE-NEXT: orps %xmm8, %xmm1
-; SSE-NEXT: andps %xmm4, %xmm7
-; SSE-NEXT: orps %xmm7, %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm9 = [18446744069414584319,18446744060824649725]
+; SSE-NEXT: andps %xmm9, %xmm6
+; SSE-NEXT: andps %xmm8, %xmm5
+; SSE-NEXT: andps %xmm9, %xmm4
+; SSE-NEXT: movaps %xmm9, %xmm10
+; SSE-NEXT: andnps %xmm0, %xmm10
+; SSE-NEXT: orps %xmm4, %xmm10
+; SSE-NEXT: movaps %xmm8, %xmm4
+; SSE-NEXT: andnps %xmm1, %xmm4
+; SSE-NEXT: orps %xmm5, %xmm4
+; SSE-NEXT: andnps %xmm2, %xmm9
+; SSE-NEXT: orps %xmm6, %xmm9
+; SSE-NEXT: andnps %xmm3, %xmm8
+; SSE-NEXT: orps %xmm7, %xmm8
+; SSE-NEXT: movaps %xmm10, %xmm0
+; SSE-NEXT: movaps %xmm4, %xmm1
+; SSE-NEXT: movaps %xmm9, %xmm2
+; SSE-NEXT: movaps %xmm8, %xmm3
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v8i64_rr:
; XOP: # %bb.0:
-; XOP-NEXT: vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
-; XOP-NEXT: vandps %ymm4, %ymm1, %ymm1
-; XOP-NEXT: vandps %ymm4, %ymm0, %ymm0
-; XOP-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
-; XOP-NEXT: vandps %ymm4, %ymm3, %ymm3
-; XOP-NEXT: vorps %ymm1, %ymm3, %ymm1
-; XOP-NEXT: vandps %ymm4, %ymm2, %ymm2
-; XOP-NEXT: vorps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: vmovdqa {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
+; XOP-NEXT: vpcmov %ymm4, %ymm0, %ymm2, %ymm0
+; XOP-NEXT: vpcmov %ymm4, %ymm1, %ymm3, %ymm1
; XOP-NEXT: retq
;
; AVX1-LABEL: bitselect_v8i64_rr:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
-; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
-; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
; AVX1-NEXT: vandps %ymm4, %ymm3, %ymm3
-; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vandps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vandnps %ymm0, %ymm4, %ymm0
; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vandnps %ymm1, %ymm4, %ymm1
+; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitselect_v8i64_rr:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
-; AVX2-NEXT: vandps %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vandps %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
; AVX2-NEXT: vandps %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vandps %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vandnps %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vandnps %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: bitselect_v8i64_rr:
@@ -355,23 +342,30 @@ define <8 x i64> @bitselect_v8i64_rr(<8
define <8 x i64> @bitselect_v8i64_rm(<8 x i64>, <8 x i64>* nocapture readonly) {
; SSE-LABEL: bitselect_v8i64_rm:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm4 = [8589934593,3]
-; SSE-NEXT: andps %xmm4, %xmm3
-; SSE-NEXT: andps %xmm4, %xmm2
-; SSE-NEXT: andps %xmm4, %xmm1
-; SSE-NEXT: andps %xmm4, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm4 = [18446744065119617022,18446744073709551612]
-; SSE-NEXT: movaps 48(%rdi), %xmm5
-; SSE-NEXT: andps %xmm4, %xmm5
-; SSE-NEXT: orps %xmm5, %xmm3
-; SSE-NEXT: movaps 32(%rdi), %xmm5
-; SSE-NEXT: andps %xmm4, %xmm5
-; SSE-NEXT: orps %xmm5, %xmm2
-; SSE-NEXT: movaps 16(%rdi), %xmm5
-; SSE-NEXT: andps %xmm4, %xmm5
-; SSE-NEXT: orps %xmm5, %xmm1
-; SSE-NEXT: andps (%rdi), %xmm4
-; SSE-NEXT: orps %xmm4, %xmm0
+; SSE-NEXT: movaps 48(%rdi), %xmm8
+; SSE-NEXT: andps %xmm4, %xmm8
+; SSE-NEXT: movaps 32(%rdi), %xmm9
+; SSE-NEXT: andps %xmm4, %xmm9
+; SSE-NEXT: movaps 16(%rdi), %xmm7
+; SSE-NEXT: andps %xmm4, %xmm7
+; SSE-NEXT: movaps (%rdi), %xmm6
+; SSE-NEXT: andps %xmm4, %xmm6
+; SSE-NEXT: movaps %xmm4, %xmm5
+; SSE-NEXT: andnps %xmm0, %xmm5
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: movaps %xmm4, %xmm6
+; SSE-NEXT: andnps %xmm1, %xmm6
+; SSE-NEXT: orps %xmm7, %xmm6
+; SSE-NEXT: movaps %xmm4, %xmm7
+; SSE-NEXT: andnps %xmm2, %xmm7
+; SSE-NEXT: orps %xmm9, %xmm7
+; SSE-NEXT: andnps %xmm3, %xmm4
+; SSE-NEXT: orps %xmm8, %xmm4
+; SSE-NEXT: movaps %xmm5, %xmm0
+; SSE-NEXT: movaps %xmm6, %xmm1
+; SSE-NEXT: movaps %xmm7, %xmm2
+; SSE-NEXT: movaps %xmm4, %xmm3
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v8i64_rm:
@@ -434,22 +428,29 @@ define <8 x i64> @bitselect_v8i64_mr(<8
; SSE-LABEL: bitselect_v8i64_mr:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm4 = [12884901890,4294967296]
-; SSE-NEXT: movaps 48(%rdi), %xmm5
-; SSE-NEXT: andps %xmm4, %xmm5
-; SSE-NEXT: movaps 32(%rdi), %xmm6
-; SSE-NEXT: andps %xmm4, %xmm6
+; SSE-NEXT: movaps 48(%rdi), %xmm8
+; SSE-NEXT: andps %xmm4, %xmm8
+; SSE-NEXT: movaps 32(%rdi), %xmm9
+; SSE-NEXT: andps %xmm4, %xmm9
; SSE-NEXT: movaps 16(%rdi), %xmm7
; SSE-NEXT: andps %xmm4, %xmm7
-; SSE-NEXT: andps (%rdi), %xmm4
-; SSE-NEXT: movaps {{.*#+}} xmm8 = [18446744060824649725,18446744069414584319]
-; SSE-NEXT: andps %xmm8, %xmm3
-; SSE-NEXT: orps %xmm5, %xmm3
-; SSE-NEXT: andps %xmm8, %xmm2
-; SSE-NEXT: orps %xmm6, %xmm2
-; SSE-NEXT: andps %xmm8, %xmm1
-; SSE-NEXT: orps %xmm7, %xmm1
-; SSE-NEXT: andps %xmm8, %xmm0
-; SSE-NEXT: orps %xmm4, %xmm0
+; SSE-NEXT: movaps (%rdi), %xmm6
+; SSE-NEXT: andps %xmm4, %xmm6
+; SSE-NEXT: movaps %xmm4, %xmm5
+; SSE-NEXT: andnps %xmm0, %xmm5
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: movaps %xmm4, %xmm6
+; SSE-NEXT: andnps %xmm1, %xmm6
+; SSE-NEXT: orps %xmm7, %xmm6
+; SSE-NEXT: movaps %xmm4, %xmm7
+; SSE-NEXT: andnps %xmm2, %xmm7
+; SSE-NEXT: orps %xmm9, %xmm7
+; SSE-NEXT: andnps %xmm3, %xmm4
+; SSE-NEXT: orps %xmm8, %xmm4
+; SSE-NEXT: movaps %xmm5, %xmm0
+; SSE-NEXT: movaps %xmm6, %xmm1
+; SSE-NEXT: movaps %xmm7, %xmm2
+; SSE-NEXT: movaps %xmm4, %xmm3
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v8i64_mr:
@@ -511,26 +512,26 @@ define <8 x i64> @bitselect_v8i64_mr(<8
define <8 x i64> @bitselect_v8i64_mm(<8 x i64>* nocapture readonly, <8 x i64>* nocapture readonly) {
; SSE-LABEL: bitselect_v8i64_mm:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm4 = [3,8589934593]
-; SSE-NEXT: movaps 48(%rdi), %xmm1
-; SSE-NEXT: andps %xmm4, %xmm1
-; SSE-NEXT: movaps 32(%rdi), %xmm5
-; SSE-NEXT: andps %xmm4, %xmm5
-; SSE-NEXT: movaps 16(%rdi), %xmm6
-; SSE-NEXT: andps %xmm4, %xmm6
-; SSE-NEXT: andps (%rdi), %xmm4
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551612,18446744065119617022]
-; SSE-NEXT: movaps 48(%rsi), %xmm3
-; SSE-NEXT: andps %xmm0, %xmm3
-; SSE-NEXT: orps %xmm1, %xmm3
-; SSE-NEXT: movaps 32(%rsi), %xmm2
-; SSE-NEXT: andps %xmm0, %xmm2
+; SSE-NEXT: movaps {{.*#+}} xmm3 = [18446744073709551612,18446744065119617022]
+; SSE-NEXT: movaps 48(%rsi), %xmm4
+; SSE-NEXT: andps %xmm3, %xmm4
+; SSE-NEXT: movaps 32(%rsi), %xmm5
+; SSE-NEXT: andps %xmm3, %xmm5
+; SSE-NEXT: movaps 16(%rsi), %xmm2
+; SSE-NEXT: andps %xmm3, %xmm2
+; SSE-NEXT: movaps (%rsi), %xmm1
+; SSE-NEXT: andps %xmm3, %xmm1
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: andnps (%rdi), %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
+; SSE-NEXT: movaps %xmm3, %xmm1
+; SSE-NEXT: andnps 16(%rdi), %xmm1
+; SSE-NEXT: orps %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm3, %xmm2
+; SSE-NEXT: andnps 32(%rdi), %xmm2
; SSE-NEXT: orps %xmm5, %xmm2
-; SSE-NEXT: movaps 16(%rsi), %xmm1
-; SSE-NEXT: andps %xmm0, %xmm1
-; SSE-NEXT: orps %xmm6, %xmm1
-; SSE-NEXT: andps (%rsi), %xmm0
-; SSE-NEXT: orps %xmm4, %xmm0
+; SSE-NEXT: andnps 48(%rdi), %xmm3
+; SSE-NEXT: orps %xmm4, %xmm3
; SSE-NEXT: retq
;
; XOP-LABEL: bitselect_v8i64_mm:
Modified: llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll Tue Jan 22 05:44:49 2019
@@ -194,37 +194,35 @@ define <4 x double> @combine_vec_fcopysi
; SSE-LABEL: combine_vec_fcopysign_fpext_sgn:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm2, %xmm3
-; SSE-NEXT: cvtss2sd %xmm2, %xmm4
-; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
-; SSE-NEXT: movaps %xmm2, %xmm6
-; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm2[1]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
-; SSE-NEXT: movaps {{.*#+}} xmm7
-; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: andps %xmm7, %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm8 = [-0.0E+0,-0.0E+0]
-; SSE-NEXT: andps %xmm8, %xmm4
-; SSE-NEXT: orps %xmm4, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE-NEXT: andps %xmm7, %xmm0
-; SSE-NEXT: xorps %xmm4, %xmm4
-; SSE-NEXT: cvtss2sd %xmm5, %xmm4
-; SSE-NEXT: andps %xmm8, %xmm4
-; SSE-NEXT: orps %xmm0, %xmm4
-; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; SSE-NEXT: andps %xmm7, %xmm0
+; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSE-NEXT: cvtss2sd %xmm2, %xmm5
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: movaps {{.*#+}} xmm6
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
-; SSE-NEXT: andps %xmm8, %xmm3
-; SSE-NEXT: orps %xmm0, %xmm3
-; SSE-NEXT: andps %xmm7, %xmm1
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: cvtss2sd %xmm6, %xmm0
-; SSE-NEXT: andps %xmm8, %xmm0
-; SSE-NEXT: orps %xmm0, %xmm1
-; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: movaps %xmm6, %xmm7
+; SSE-NEXT: andnps %xmm3, %xmm7
+; SSE-NEXT: movaps %xmm1, %xmm3
+; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; SSE-NEXT: andps %xmm6, %xmm3
+; SSE-NEXT: orps %xmm3, %xmm7
+; SSE-NEXT: andps %xmm6, %xmm1
+; SSE-NEXT: cvtss2sd %xmm2, %xmm2
+; SSE-NEXT: movaps %xmm6, %xmm3
+; SSE-NEXT: andnps %xmm2, %xmm3
+; SSE-NEXT: orps %xmm3, %xmm1
+; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-NEXT: andps %xmm6, %xmm2
+; SSE-NEXT: xorps %xmm3, %xmm3
+; SSE-NEXT: cvtss2sd %xmm4, %xmm3
+; SSE-NEXT: andps %xmm6, %xmm0
+; SSE-NEXT: andnps %xmm3, %xmm6
+; SSE-NEXT: orps %xmm2, %xmm6
+; SSE-NEXT: andps {{.*}}(%rip), %xmm5
+; SSE-NEXT: orps %xmm5, %xmm0
+; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fpext_sgn:
@@ -246,35 +244,37 @@ define <4 x float> @combine_vec_fcopysig
; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: movaps {{.*#+}} xmm5
-; SSE-NEXT: andps %xmm5, %xmm0
-; SSE-NEXT: cvtsd2ss %xmm1, %xmm6
-; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; SSE-NEXT: andps %xmm4, %xmm6
-; SSE-NEXT: orps %xmm6, %xmm0
-; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
-; SSE-NEXT: andps %xmm5, %xmm6
+; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
+; SSE-NEXT: movaps {{.*#+}} xmm4
+; SSE-NEXT: andps %xmm4, %xmm3
+; SSE-NEXT: cvtsd2ss %xmm2, %xmm5
+; SSE-NEXT: movaps %xmm4, %xmm6
+; SSE-NEXT: andnps %xmm5, %xmm6
+; SSE-NEXT: orps %xmm3, %xmm6
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: andps %xmm4, %xmm3
+; SSE-NEXT: xorps %xmm5, %xmm5
+; SSE-NEXT: cvtsd2ss %xmm1, %xmm5
+; SSE-NEXT: movaps %xmm4, %xmm7
+; SSE-NEXT: andnps %xmm5, %xmm7
+; SSE-NEXT: orps %xmm7, %xmm3
+; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: cvtsd2ss %xmm1, %xmm1
-; SSE-NEXT: andps %xmm4, %xmm1
-; SSE-NEXT: orps %xmm6, %xmm1
-; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
-; SSE-NEXT: andps %xmm5, %xmm1
-; SSE-NEXT: xorps %xmm6, %xmm6
-; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
-; SSE-NEXT: andps %xmm4, %xmm6
-; SSE-NEXT: orps %xmm1, %xmm6
-; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; SSE-NEXT: andps %xmm5, %xmm3
+; SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; SSE-NEXT: orps %xmm5, %xmm1
+; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1],xmm6[0],xmm3[3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-NEXT: andps %xmm4, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsd2ss %xmm2, %xmm1
-; SSE-NEXT: andps %xmm4, %xmm1
-; SSE-NEXT: orps %xmm3, %xmm1
-; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; SSE-NEXT: andnps %xmm1, %xmm4
+; SSE-NEXT: orps %xmm0, %xmm4
+; SSE-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn:
Modified: llvm/trunk/test/CodeGen/X86/vec-copysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec-copysign.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec-copysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec-copysign.ll Tue Jan 22 05:44:49 2019
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 --check-prefix=CHECK
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX
; Assertions have been enhanced from utils/update_llc_test_checks.py to show the constant pool values.
; Use a macosx triple to make sure the format of those constant strings is exact.
@@ -35,12 +35,6 @@ define <4 x float> @v4f32(<4 x float> %a
ret <4 x float> %tmp
}
-; SSE2: [[SIGNMASK2:L.+]]:
-; SSE2-NEXT: .long 2147483648
-; SSE2-NEXT: .long 2147483648
-; SSE2-NEXT: .long 2147483648
-; SSE2-NEXT: .long 2147483648
-
; SSE2: [[MAGMASK2:L.+]]:
; SSE2-NEXT: .long 2147483647
; SSE2-NEXT: .long 2147483647
@@ -70,14 +64,14 @@ define <4 x float> @v4f32(<4 x float> %a
define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
; SSE2-LABEL: v8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movaps [[SIGNMASK2]](%rip), %xmm4
-; SSE2-NEXT: andps %xmm4, %xmm2
-; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm5
-; SSE2-NEXT: andps %xmm5, %xmm0
-; SSE2-NEXT: orps %xmm2, %xmm0
-; SSE2-NEXT: andps %xmm4, %xmm3
-; SSE2-NEXT: andps %xmm5, %xmm1
-; SSE2-NEXT: orps %xmm3, %xmm1
+; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm4
+; SSE2-NEXT: movaps %xmm4, %xmm5
+; SSE2-NEXT: andnps %xmm2, %xmm5
+; SSE2-NEXT: andps %xmm4, %xmm0
+; SSE2-NEXT: orps %xmm5, %xmm0
+; SSE2-NEXT: andps %xmm4, %xmm1
+; SSE2-NEXT: andnps %xmm3, %xmm4
+; SSE2-NEXT: orps %xmm4, %xmm1
; SSE2-NEXT: retq
;
; AVX-LABEL: v8f32:
@@ -118,10 +112,6 @@ define <2 x double> @v2f64(<2 x double>
ret <2 x double> %tmp
}
-; SSE2: [[SIGNMASK4:L.+]]:
-; SSE2-NEXT: .quad -9223372036854775808
-; SSE2-NEXT: .quad -9223372036854775808
-
; SSE2: [[MAGMASK4:L.+]]:
; SSE2-NEXT: .quad 9223372036854775807
; SSE2-NEXT: .quad 9223372036854775807
@@ -141,14 +131,14 @@ define <2 x double> @v2f64(<2 x double>
define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
; SSE2-LABEL: v4f64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movaps [[SIGNMASK4]](%rip), %xmm4
-; SSE2-NEXT: andps %xmm4, %xmm2
-; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm5
-; SSE2-NEXT: andps %xmm5, %xmm0
-; SSE2-NEXT: orps %xmm2, %xmm0
-; SSE2-NEXT: andps %xmm4, %xmm3
-; SSE2-NEXT: andps %xmm5, %xmm1
-; SSE2-NEXT: orps %xmm3, %xmm1
+; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm4
+; SSE2-NEXT: movaps %xmm4, %xmm5
+; SSE2-NEXT: andnps %xmm2, %xmm5
+; SSE2-NEXT: andps %xmm4, %xmm0
+; SSE2-NEXT: orps %xmm5, %xmm0
+; SSE2-NEXT: andps %xmm4, %xmm1
+; SSE2-NEXT: andnps %xmm3, %xmm4
+; SSE2-NEXT: orps %xmm4, %xmm1
; SSE2-NEXT: retq
;
; AVX-LABEL: v4f64:
Modified: llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll Tue Jan 22 05:44:49 2019
@@ -236,16 +236,16 @@ define i64 @test_bitreverse_i64(i64 %a)
define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
@@ -309,18 +309,17 @@ define <8 x i16> @test_bitreverse_v8i16(
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
@@ -386,18 +385,17 @@ define <4 x i32> @test_bitreverse_v4i32(
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
@@ -465,18 +463,17 @@ define <2 x i64> @test_bitreverse_v2i64(
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
@@ -535,57 +532,59 @@ define <2 x i64> @test_bitreverse_v2i64(
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm5, %xmm3
-; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pandn %xmm3, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: psllw $2, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
-; SSE2-NEXT: pand %xmm9, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT: pand %xmm9, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
-; SSE2-NEXT: pand %xmm4, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm11, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: pand %xmm2, %xmm7
-; SSE2-NEXT: psllw $4, %xmm7
-; SSE2-NEXT: pand %xmm5, %xmm7
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: pand %xmm1, %xmm5
+; SSE2-NEXT: psllw $4, %xmm5
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm5, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm9, %xmm3
; SSE2-NEXT: pand %xmm10, %xmm1
+; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: pand %xmm11, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: retq
@@ -686,74 +685,76 @@ define <32 x i8> @test_bitreverse_v32i8(
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i16:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: pandn %xmm3, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
-; SSE2-NEXT: pand %xmm9, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT: pand %xmm9, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
-; SSE2-NEXT: pand %xmm5, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm11, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm12, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: packuswb %xmm7, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm4, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm9, %xmm3
; SSE2-NEXT: pand %xmm10, %xmm1
+; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm5
; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm11, %xmm5
-; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pand %xmm12, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: retq
@@ -862,74 +863,76 @@ define <16 x i16> @test_bitreverse_v16i1
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: pandn %xmm3, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
-; SSE2-NEXT: pand %xmm9, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT: pand %xmm9, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
-; SSE2-NEXT: pand %xmm5, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm11, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm12, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm7, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm4, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm9, %xmm3
; SSE2-NEXT: pand %xmm10, %xmm1
+; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm5
; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm11, %xmm5
-; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pand %xmm12, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: retq
@@ -1038,78 +1041,80 @@ define <8 x i32> @test_bitreverse_v8i32(
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i64:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: pandn %xmm3, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
-; SSE2-NEXT: pand %xmm9, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT: pand %xmm9, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
-; SSE2-NEXT: pand %xmm5, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm11, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm12, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: psrlw $1, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm7, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm4, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm9, %xmm3
; SSE2-NEXT: pand %xmm10, %xmm1
+; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm5
; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm11, %xmm5
-; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pand %xmm12, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: retq
@@ -1218,103 +1223,107 @@ define <4 x i64> @test_bitreverse_v4i64(
define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v64i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: movdqa %xmm3, %xmm14
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm13, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm7, %xmm5
-; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm5, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; SSE2-NEXT: pand %xmm9, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm13, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
-; SSE2-NEXT: pand %xmm9, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm6, %xmm4
-; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm11, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm12, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE2-NEXT: pand %xmm12, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; SSE2-NEXT: pand %xmm13, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm13, %xmm4
-; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm3, %xmm7
+; SSE2-NEXT: psllw $4, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm13, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: psllw $2, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm10, %xmm1
+; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: pand %xmm11, %xmm4
-; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pand %xmm12, %xmm4
+; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pand %xmm13, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: pandn %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm9, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm13, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: por %xmm7, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: psllw $2, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm2
-; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm10, %xmm2
+; SSE2-NEXT: psrlw $2, %xmm2
+; SSE2-NEXT: pand %xmm11, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: pand %xmm11, %xmm4
-; SSE2-NEXT: pand %xmm12, %xmm2
+; SSE2-NEXT: pand %xmm12, %xmm4
+; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm13, %xmm4
+; SSE2-NEXT: movdqa %xmm14, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm3
-; SSE2-NEXT: psrlw $4, %xmm3
-; SSE2-NEXT: pand %xmm13, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm9, %xmm14
+; SSE2-NEXT: psrlw $4, %xmm14
+; SSE2-NEXT: pand %xmm3, %xmm14
+; SSE2-NEXT: pandn %xmm4, %xmm3
+; SSE2-NEXT: por %xmm14, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm3
-; SSE2-NEXT: psrlw $2, %xmm3
; SSE2-NEXT: pand %xmm10, %xmm3
+; SSE2-NEXT: psrlw $2, %xmm3
+; SSE2-NEXT: pand %xmm11, %xmm3
; SSE2-NEXT: por %xmm5, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm11, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm3
+; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: pand %xmm13, %xmm3
; SSE2-NEXT: paddb %xmm3, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: retq
@@ -1481,25 +1490,27 @@ define <64 x i8> @test_bitreverse_v64i8(
define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i16:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm15
; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: pandn %xmm5, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm8, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
@@ -1531,74 +1542,76 @@ define <32 x i16> @test_bitreverse_v32i1
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm6, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: psllw $2, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm11, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm12, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: packuswb %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: psllw $4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: psllw $2, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm11, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm12, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm15, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm15[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6]
-; SSE2-NEXT: packuswb %xmm6, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: packuswb %xmm4, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: psrlw $4, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: psrlw $4, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
@@ -1795,25 +1808,27 @@ define <32 x i16> @test_bitreverse_v32i1
define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm15
; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: pandn %xmm5, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm8, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
@@ -1845,74 +1860,76 @@ define <16 x i32> @test_bitreverse_v16i3
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm6, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: psllw $2, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm11, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm12, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: packuswb %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: psllw $4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: psllw $2, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm11, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm12, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm15, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm15[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm6, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: packuswb %xmm4, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: psrlw $4, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: psrlw $4, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
@@ -2115,27 +2132,29 @@ define <16 x i32> @test_bitreverse_v16i3
define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i64:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm15
; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: psrlw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: pandn %xmm5, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm8, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
@@ -2169,78 +2188,80 @@ define <8 x i64> @test_bitreverse_v8i64(
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm6, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: psllw $2, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm11, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm12, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: packuswb %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: psllw $4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: psrlw $4, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: psllw $2, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm11, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm12, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm15, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm15[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm6, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: packuswb %xmm4, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: psrlw $4, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: psrlw $4, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
Modified: llvm/trunk/test/CodeGen/X86/vector-fshl-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshl-256.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshl-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshl-256.ll Tue Jan 22 05:44:49 2019
@@ -2556,10 +2556,8 @@ define <32 x i8> @splatconstant_funnnel_
; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
-; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; XOPAVX2-NEXT: vpsllw $4, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
ret <32 x i8> %res
Modified: llvm/trunk/test/CodeGen/X86/vector-fshl-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshl-512.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshl-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshl-512.ll Tue Jan 22 05:44:49 2019
@@ -1504,32 +1504,30 @@ define <64 x i8> @splatconstant_funnnel_
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm3, %ymm2
-; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm2
-; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
Modified: llvm/trunk/test/CodeGen/X86/vector-fshl-rot-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshl-rot-256.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshl-rot-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshl-rot-256.ll Tue Jan 22 05:44:49 2019
@@ -341,47 +341,45 @@ define <32 x i8> @var_funnnel_v32i8(<32
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7
+; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
@@ -1475,16 +1473,15 @@ define <32 x i8> @splatconstant_funnnel_
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vector-fshl-rot-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshl-rot-512.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshl-rot-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshl-rot-512.ll Tue Jan 22 05:44:49 2019
@@ -111,49 +111,47 @@ define <64 x i8> @var_funnnel_v64i8(<64
; AVX512F-LABEL: var_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
-; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm9
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT: vpand %ymm10, %ymm9, %ymm9
-; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpandn %ymm4, %ymm7, %ymm4
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8
+; AVX512F-NEXT: vpand %ymm7, %ymm8, %ymm8
+; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm11
-; AVX512F-NEXT: vpor %ymm4, %ymm11, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm9
+; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm7, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
-; AVX512F-NEXT: vpand %ymm10, %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
@@ -163,49 +161,47 @@ define <64 x i8> @var_funnnel_v64i8(<64
; AVX512VL-LABEL: var_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
-; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm9
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT: vpand %ymm10, %ymm9, %ymm9
-; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpandn %ymm4, %ymm7, %ymm4
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8
+; AVX512VL-NEXT: vpand %ymm7, %ymm8, %ymm8
+; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm11
-; AVX512VL-NEXT: vpor %ymm4, %ymm11, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm9
+; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm7, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
-; AVX512VL-NEXT: vpand %ymm10, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
@@ -776,32 +772,30 @@ define <64 x i8> @splatconstant_funnnel_
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
Modified: llvm/trunk/test/CodeGen/X86/vector-fshr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshr-256.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshr-256.ll Tue Jan 22 05:44:49 2019
@@ -2569,10 +2569,8 @@ define <32 x i8> @splatconstant_funnnel_
; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
-; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; XOPAVX2-NEXT: vpsllw $4, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
ret <32 x i8> %res
Modified: llvm/trunk/test/CodeGen/X86/vector-fshr-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshr-512.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshr-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshr-512.ll Tue Jan 22 05:44:49 2019
@@ -1496,32 +1496,30 @@ define <64 x i8> @splatconstant_funnnel_
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm3, %ymm2
-; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm2
-; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
Modified: llvm/trunk/test/CodeGen/X86/vector-fshr-rot-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshr-rot-256.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshr-rot-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshr-rot-256.ll Tue Jan 22 05:44:49 2019
@@ -372,50 +372,48 @@ define <32 x i8> @var_funnnel_v32i8(<32
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpxor %xmm10, %xmm10, %xmm10
-; AVX1-NEXT: vpsubb %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
+; AVX1-NEXT: vpsubb %xmm5, %xmm8, %xmm5
; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $2, %xmm2, %xmm6
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT: vpandn %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm6
; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm6
+; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpsubb %xmm1, %xmm10, %xmm1
+; AVX1-NEXT: vpsubb %xmm1, %xmm8, %xmm1
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3
+; AVX1-NEXT: vpandn %xmm3, %xmm7, %xmm3
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
@@ -1552,16 +1550,15 @@ define <32 x i8> @splatconstant_funnnel_
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vector-fshr-rot-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-fshr-rot-512.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-fshr-rot-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-fshr-rot-512.ll Tue Jan 22 05:44:49 2019
@@ -117,52 +117,50 @@ define <64 x i8> @var_funnnel_v64i8(<64
; AVX512F-LABEL: var_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512F-NEXT: vpsubb %ymm2, %ymm6, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm10
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT: vpand %ymm11, %ymm10, %ymm10
-; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpandn %ymm4, %ymm8, %ymm4
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm9
+; AVX512F-NEXT: vpand %ymm8, %ymm9, %ymm9
+; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpand %ymm10, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm12
-; AVX512F-NEXT: vpor %ymm4, %ymm12, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsubb %ymm3, %ymm6, %ymm3
-; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm8, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
-; AVX512F-NEXT: vpand %ymm11, %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm10, %ymm2, %ymm2
+; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
@@ -172,52 +170,50 @@ define <64 x i8> @var_funnnel_v64i8(<64
; AVX512VL-LABEL: var_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512VL-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512VL-NEXT: vpsubb %ymm2, %ymm6, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm10
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT: vpand %ymm11, %ymm10, %ymm10
-; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpandn %ymm4, %ymm8, %ymm4
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm9
+; AVX512VL-NEXT: vpand %ymm8, %ymm9, %ymm9
+; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT: vpand %ymm10, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm12
-; AVX512VL-NEXT: vpor %ymm4, %ymm12, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsubb %ymm3, %ymm6, %ymm3
-; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512VL-NEXT: vpand %ymm7, %ymm3, %ymm3
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm8, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
-; AVX512VL-NEXT: vpand %ymm11, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm10, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
@@ -796,32 +792,30 @@ define <64 x i8> @splatconstant_funnnel_
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll Tue Jan 22 05:44:49 2019
@@ -337,47 +337,45 @@ define <32 x i8> @var_rotate_v32i8(<32 x
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7
+; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
@@ -1476,16 +1474,15 @@ define <32 x i8> @splatconstant_rotate_v
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -1760,16 +1757,15 @@ define <32 x i8> @splatconstant_rotate_m
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll?rev=351819&r1=351818&r2=351819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll Tue Jan 22 05:44:49 2019
@@ -109,46 +109,44 @@ define <64 x i8> @var_rotate_v64i8(<64 x
; AVX512F-LABEL: var_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT: vpand %ymm9, %ymm8, %ymm8
-; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpandn %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7
+; AVX512F-NEXT: vpand %ymm6, %ymm7, %ymm7
+; AVX512F-NEXT: vpor %ymm4, %ymm7, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
-; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm8
+; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm6, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
-; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
@@ -158,46 +156,44 @@ define <64 x i8> @var_rotate_v64i8(<64 x
; AVX512VL-LABEL: var_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
-; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT: vpand %ymm9, %ymm8, %ymm8
-; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpandn %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7
+; AVX512VL-NEXT: vpand %ymm6, %ymm7, %ymm7
+; AVX512VL-NEXT: vpor %ymm4, %ymm7, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
-; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm8
+; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm6, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
-; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
@@ -767,32 +763,30 @@ define <64 x i8> @splatconstant_rotate_v
; AVX512F-LABEL: splatconstant_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
@@ -907,38 +901,36 @@ define <64 x i8> @splatconstant_rotate_m
; AVX512F-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5
-; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm4
+; AVX512F-NEXT: vpandn %ymm4, %ymm3, %ymm4
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5
-; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm4
+; AVX512VL-NEXT: vpandn %ymm4, %ymm3, %ymm4
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT: vpor %ymm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
More information about the llvm-commits
mailing list