[llvm] ff37b11 - [LegalizeVectorOps][X86] Don't defer BITREVERSE expansion to LegalizeDAG.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 21 15:23:28 PDT 2021
Author: Craig Topper
Date: 2021-10-21T15:23:23-07:00
New Revision: ff37b1105d6eb7b9a4e4515c2ced95e9837567aa
URL: https://github.com/llvm/llvm-project/commit/ff37b1105d6eb7b9a4e4515c2ced95e9837567aa
DIFF: https://github.com/llvm/llvm-project/commit/ff37b1105d6eb7b9a4e4515c2ced95e9837567aa.diff
LOG: [LegalizeVectorOps][X86] Don't defer BITREVERSE expansion to LegalizeDAG.
By expanding early it allows the shifts to be custom lowered in
LegalizeVectorOps. Then a DAG combine is able to run on them before
LegalizeDAG handles the BUILD_VECTORS for the masks used.
v16Xi8 shift lowering on X86 requires a mask to be applied to a v8i16
shift. The BITREVERSE expansion applied an AND mask before SHL ops and
after SRL ops. This was done to share the same mask constant for both shifts.
It looks like this patch allows DAG combine to remove the AND mask added
after v16i8 SHL by X86 lowering. This maintains the mask sharing that
BITREVERSE was trying to achieve. Prior to this patch it looks like
we kept the mask after the SHL instead which required an extra constant
pool or a PANDN to invert it.
This is dependent on D112248 because RISCV will end up scalarizing the BSWAP
portion of the BITREVERSE expansion if we don't disable BSWAP scalarization in
LegalizeVectorOps first.
Reviewed By: RKSimon
Differential Revision: https://reviews.llvm.org/D112254
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
llvm/test/CodeGen/X86/bitreverse.ll
llvm/test/CodeGen/X86/combine-bitreverse.ll
llvm/test/CodeGen/X86/vector-bitreverse.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 21ed46402c4c..ec38720f5eea 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1162,9 +1162,10 @@ void VectorLegalizer::ExpandBITREVERSE(SDNode *Node,
if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
TLI.isOperationLegalOrCustom(ISD::SRL, VT) &&
TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) &&
- TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT))
- // Let LegalizeDAG handle this later.
+ TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT)) {
+ Results.push_back(TLI.expandBITREVERSE(Node, DAG));
return;
+ }
// Otherwise unroll.
SDValue Tmp = DAG.UnrollVectorOp(Node);
diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll
index bde6329e8e3c..8433c7583d13 100644
--- a/llvm/test/CodeGen/X86/bitreverse.ll
+++ b/llvm/test/CodeGen/X86/bitreverse.ll
@@ -58,10 +58,11 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
; X64-NEXT: psllw $8, %xmm0
; X64-NEXT: por %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, %xmm1
-; X64-NEXT: psllw $4, %xmm1
-; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; X64-NEXT: psrlw $4, %xmm0
-; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: psrlw $4, %xmm1
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X64-NEXT: pand %xmm2, %xmm1
+; X64-NEXT: pand %xmm2, %xmm0
+; X64-NEXT: psllw $4, %xmm0
; X64-NEXT: por %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlw $2, %xmm1
diff --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll
index 4a50f7c879ad..3c359a5efe79 100644
--- a/llvm/test/CodeGen/X86/combine-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll
@@ -50,10 +50,11 @@ define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind {
; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; X86-NEXT: packuswb %xmm2, %xmm0
; X86-NEXT: movdqa %xmm0, %xmm1
-; X86-NEXT: psllw $4, %xmm1
-; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT: psrlw $4, %xmm0
-; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: psrlw $4, %xmm1
+; X86-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-NEXT: pand %xmm2, %xmm1
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: psllw $4, %xmm0
; X86-NEXT: por %xmm1, %xmm0
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrlw $2, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index 9439cdf4773c..a9b872c0254b 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -668,10 +668,11 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i8:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psllw $4, %xmm1
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
@@ -758,10 +759,11 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; SSE2-NEXT: psllw $8, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psllw $4, %xmm1
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
@@ -860,10 +862,11 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psllw $4, %xmm1
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
@@ -964,10 +967,11 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psllw $4, %xmm1
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
@@ -1056,15 +1060,13 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psllw $4, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psrlw $2, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
@@ -1079,12 +1081,12 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm5, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psrlw $4, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm2
@@ -1229,19 +1231,17 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: psllw $8, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psllw $4, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psrlw $2, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
@@ -1256,16 +1256,16 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: psrlw $8, %xmm5
-; SSE2-NEXT: psllw $8, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm5, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: psllw $8, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psrlw $4, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm2
@@ -1427,24 +1427,22 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm4, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: psrlw $2, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
@@ -1459,19 +1457,19 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: psllw $4, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
@@ -1634,26 +1632,24 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psllw $4, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm4, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: psrlw $2, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
@@ -1668,21 +1664,21 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: psllw $4, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
@@ -1845,20 +1841,18 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v64i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pandn %xmm5, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: psrlw $2, %xmm6
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: psllw $2, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm7
@@ -1869,53 +1863,51 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: psllw $4, %xmm7
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm7, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm1
+; SSE2-NEXT: psrlw $4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: psrlw $2, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm7
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: pandn %xmm5, %xmm7
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: psllw $4, %xmm2
; SSE2-NEXT: por %xmm7, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $2, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: psllw $2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm7
; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm5, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: psrlw $4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: psllw $4, %xmm3
+; SSE2-NEXT: por %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlw $2, %xmm4
-; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
@@ -2133,24 +2125,22 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrlw $8, %xmm4
; SSE2-NEXT: psllw $8, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pandn %xmm5, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: psrlw $2, %xmm6
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: psllw $2, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm7
@@ -2165,61 +2155,59 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-NEXT: psllw $8, %xmm1
; SSE2-NEXT: por %xmm7, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: psllw $4, %xmm7
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm7, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm1
+; SSE2-NEXT: psrlw $4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: psrlw $2, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm7
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $8, %xmm5
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $8, %xmm7
; SSE2-NEXT: psllw $8, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: pandn %xmm5, %xmm7
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: por %xmm7, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: psllw $4, %xmm2
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $2, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: psllw $2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: psrlw $1, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm7
; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: psrlw $8, %xmm5
-; SSE2-NEXT: psllw $8, %xmm4
-; SSE2-NEXT: por %xmm5, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: psrlw $4, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm5, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: psrlw $8, %xmm7
+; SSE2-NEXT: psllw $8, %xmm3
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: psrlw $4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: psllw $4, %xmm3
+; SSE2-NEXT: por %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlw $2, %xmm4
-; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
@@ -2472,110 +2460,106 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm8[8],xmm3[9],xmm8[9],xmm3[10],xmm8[10],xmm3[11],xmm8[11],xmm3[12],xmm8[12],xmm3[13],xmm8[13],xmm3[14],xmm8[14],xmm3[15],xmm8[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: pandn %xmm6, %xmm7
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: psrlw $2, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE2-NEXT: pand %xmm9, %xmm7
-; SSE2-NEXT: pand %xmm9, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: psllw $2, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pandn %xmm5, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $2, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pandn %xmm5, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm2
+; SSE2-NEXT: packuswb %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: psllw $4, %xmm2
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $2, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: psllw $2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm5, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlw $4, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: psllw $4, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlw $2, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm3
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
@@ -2828,118 +2812,114 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm8[8],xmm3[9],xmm8[9],xmm3[10],xmm8[10],xmm3[11],xmm8[11],xmm3[12],xmm8[12],xmm3[13],xmm8[13],xmm3[14],xmm8[14],xmm3[15],xmm8[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: psllw $4, %xmm6
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: pandn %xmm6, %xmm7
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: psrlw $4, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: psrlw $2, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE2-NEXT: pand %xmm9, %xmm7
-; SSE2-NEXT: pand %xmm9, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: psllw $2, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
-; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pandn %xmm5, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $2, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pandn %xmm5, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm5
-; SSE2-NEXT: pand %xmm9, %xmm2
+; SSE2-NEXT: packuswb %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: psllw $4, %xmm2
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $2, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: psllw $2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $1, %xmm4
+; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: packuswb %xmm5, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; SSE2-NEXT: packuswb %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlw $4, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: psllw $4, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrlw $2, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm3
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
More information about the llvm-commits
mailing list