[llvm] r289837 - [x86] use a single shufps when it can save instructions
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 15 10:03:39 PST 2016
Author: spatel
Date: Thu Dec 15 12:03:38 2016
New Revision: 289837
URL: http://llvm.org/viewvc/llvm-project?rev=289837&view=rev
Log:
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll
llvm/trunk/test/CodeGen/X86/avx-trunc.ll
llvm/trunk/test/CodeGen/X86/combine-or.ll
llvm/trunk/test/CodeGen/X86/combine-shl.ll
llvm/trunk/test/CodeGen/X86/combine-sra.ll
llvm/trunk/test/CodeGen/X86/combine-srl.ll
llvm/trunk/test/CodeGen/X86/compress_expand.ll
llvm/trunk/test/CodeGen/X86/i64-to-float.ll
llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
llvm/trunk/test/CodeGen/X86/masked_memop.ll
llvm/trunk/test/CodeGen/X86/oddshuffles.ll
llvm/trunk/test/CodeGen/X86/palignr.ll
llvm/trunk/test/CodeGen/X86/pmul.ll
llvm/trunk/test/CodeGen/X86/reduce-trunc-shl.ll
llvm/trunk/test/CodeGen/X86/sse-fsignum.ll
llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll
llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
llvm/trunk/test/CodeGen/X86/vector-trunc.ll
llvm/trunk/test/CodeGen/X86/vsplit-and.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Dec 15 12:03:38 2016
@@ -10119,26 +10119,31 @@ static SDValue lowerV4I32VectorShuffle(c
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
- // If we have direct support for blends, we should lower by decomposing into
- // a permute. That will be faster than the domain cross.
- if (IsBlendSupported)
- return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
- Mask, DAG);
+ // Assume that a single SHUFPS is faster than an alternative sequence of
+ // multiple instructions (even if the CPU has a domain penalty).
+ // If some CPU is harmed by the domain switch, we can fix it in a later pass.
+ if (!isSingleSHUFPSMask(Mask)) {
+ // If we have direct support for blends, we should lower by decomposing into
+ // a permute. That will be faster than the domain cross.
+ if (IsBlendSupported)
+ return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
+ Mask, DAG);
- // Try to lower by permuting the inputs into an unpack instruction.
- if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1,
- V2, Mask, DAG))
- return Unpack;
+ // Try to lower by permuting the inputs into an unpack instruction.
+ if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
+ DL, MVT::v4i32, V1, V2, Mask, DAG))
+ return Unpack;
+ }
// We implement this with SHUFPS because it can blend from two vectors.
// Because we're going to eventually use SHUFPS, we use SHUFPS even to build
// up the inputs, bypassing domain shift penalties that we would encur if we
// directly used PSHUFD on Nehalem and older. For newer chips, this isn't
// relevant.
- return DAG.getBitcast(
- MVT::v4i32,
- DAG.getVectorShuffle(MVT::v4f32, DL, DAG.getBitcast(MVT::v4f32, V1),
- DAG.getBitcast(MVT::v4f32, V2), Mask));
+ SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
+ SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
+ SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
+ return DAG.getBitcast(MVT::v4i32, ShufPS);
}
/// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
Modified: llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll (original)
+++ llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll Thu Dec 15 12:03:38 2016
@@ -19,12 +19,11 @@ define void @pull_bitcast(<4 x i8>* %pA,
define <4 x i32> @multi_use_swizzle(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: multi_use_swizzle:
; CHECK: # BB#0:
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,1,1,2]
-; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = mem[1,1,2,3]
-; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,2,2]
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,2]
-; CHECK-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vmovaps (%rdi), %xmm0
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[1,2]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,2,2]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,2]
+; CHECK-NEXT: vxorps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%A = load <4 x i32>, <4 x i32>* %pA
%B = load <4 x i32>, <4 x i32>* %pB
Modified: llvm/trunk/test/CodeGen/X86/avx-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-trunc.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-trunc.ll Thu Dec 15 12:03:38 2016
@@ -5,9 +5,7 @@ define <4 x i32> @trunc_64_32(<4 x i64>
; CHECK-LABEL: trunc_64_32:
; CHECK: # BB#0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
-; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
Modified: llvm/trunk/test/CodeGen/X86/combine-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-or.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-or.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-or.ll Thu Dec 15 12:03:38 2016
@@ -170,8 +170,7 @@ define <4 x i32> @test12(<4 x i32> %a, <
define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test13:
; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[2,3]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 1, i32 1, i32 4, i32 4>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -195,9 +194,8 @@ define <2 x i64> @test14(<2 x i64> %a, <
define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test15:
; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,1,2,3]
-; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm0[2,1]
+; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 1>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 2, i32 1, i32 4, i32 4>
Modified: llvm/trunk/test/CodeGen/X86/combine-shl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-shl.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-shl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-shl.ll Thu Dec 15 12:03:38 2016
@@ -107,10 +107,8 @@ define <4 x i32> @combine_vec_shl_known_
define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_shl_trunc_and:
; SSE: # BB#0:
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/combine-sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sra.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sra.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sra.ll Thu Dec 15 12:03:38 2016
@@ -161,15 +161,13 @@ define <4 x i32> @combine_vec_ashr_ashr2
define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_ashr_trunc_and:
; SSE: # BB#0:
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrad %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrad %xmm2, %xmm4
@@ -203,19 +201,18 @@ define <4 x i32> @combine_vec_ashr_trunc
define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
; SSE-LABEL: combine_vec_ashr_trunc_lshr:
; SSE: # BB#0:
-; SSE-NEXT: psrlq $32, %xmm0
; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: psrad $2, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrad $3, %xmm2
-; SSE-NEXT: psrad $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: psrad $2, %xmm1
+; SSE-NEXT: blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: psrad $3, %xmm0
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_trunc_lshr:
@@ -237,17 +234,19 @@ define <4 x i32> @combine_vec_ashr_trunc
define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
; SSE-LABEL: combine_vec_ashr_trunc_ashr:
; SSE: # BB#0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,0,2]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrad $2, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrad $3, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,2]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: psrad $2, %xmm1
+; SSE-NEXT: blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: psrad $3, %xmm0
; SSE-NEXT: psrad $1, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_trunc_ashr:
Modified: llvm/trunk/test/CodeGen/X86/combine-srl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-srl.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-srl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-srl.ll Thu Dec 15 12:03:38 2016
@@ -223,11 +223,9 @@ define <4 x i32> @combine_vec_lshr_lshr_
define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
; SSE: # BB#0:
-; SSE-NEXT: psrlq $32, %xmm0
; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: retq
;
@@ -248,23 +246,21 @@ define <4 x i32> @combine_vec_lshr_trunc
define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $33, %xmm2
-; SSE-NEXT: psrlq $32, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $35, %xmm2
; SSE-NEXT: psrlq $34, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $19, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrlq $33, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: psrld $19, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: psrld $17, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: psrld $18, %xmm1
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -305,23 +301,21 @@ define <4 x i32> @combine_vec_lshr_trunc
define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $49, %xmm2
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $51, %xmm2
; SSE-NEXT: psrlq $50, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $27, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrlq $49, %xmm2
+; SSE-NEXT: psrlq $48, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: psrld $27, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: psrld $25, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: psrld $26, %xmm1
; SSE-NEXT: psrld $24, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -483,15 +477,13 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x
define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_lshr_trunc_and:
; SSE: # BB#0:
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrld %xmm2, %xmm4
Modified: llvm/trunk/test/CodeGen/X86/compress_expand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compress_expand.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compress_expand.ll Thu Dec 15 12:03:38 2016
@@ -252,8 +252,7 @@ define <2 x float> @test13(float* %base,
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
-; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; KNL-NEXT: vmovq {{.*#+}} xmm1 = xmm1[0],zero
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL-NEXT: vpxord %zmm2, %zmm2, %zmm2
; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm2, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
@@ -283,8 +282,7 @@ define void @test14(float* %base, <2 x f
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
-; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; KNL-NEXT: vmovq {{.*#+}} xmm1 = xmm1[0],zero
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL-NEXT: vpxord %zmm2, %zmm2, %zmm2
; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm2, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
Modified: llvm/trunk/test/CodeGen/X86/i64-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i64-to-float.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i64-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i64-to-float.ll Thu Dec 15 12:03:38 2016
@@ -71,11 +71,9 @@ define <2 x double> @mask_uitofp_2i64_2f
define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_sitofp_4i64_4f32:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
@@ -83,20 +81,16 @@ define <4 x float> @mask_sitofp_4i64_4f3
; X32-AVX: # BB#0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: vzeroupper
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_sitofp_4i64_4f32:
; X64-SSE: # BB#0:
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
@@ -104,9 +98,7 @@ define <4 x float> @mask_sitofp_4i64_4f3
; X64-AVX: # BB#0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
@@ -118,11 +110,9 @@ define <4 x float> @mask_sitofp_4i64_4f3
define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_uitofp_4i64_4f32:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
@@ -130,20 +120,16 @@ define <4 x float> @mask_uitofp_4i64_4f3
; X32-AVX: # BB#0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X32-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: vzeroupper
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_uitofp_4i64_4f32:
; X64-SSE: # BB#0:
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
@@ -151,9 +137,7 @@ define <4 x float> @mask_uitofp_4i64_4f3
; X64-AVX: # BB#0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; X64-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll Thu Dec 15 12:03:38 2016
@@ -1041,8 +1041,7 @@ define void @test20(<2 x float>%a1, <2 x
; KNL_64: # BB#0:
; KNL_64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL_64-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; KNL_64-NEXT: vmovq {{.*#+}} xmm2 = xmm2[0],zero
+; KNL_64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
; KNL_64-NEXT: vpxor %ymm3, %ymm3, %ymm3
; KNL_64-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
@@ -1053,8 +1052,7 @@ define void @test20(<2 x float>%a1, <2 x
; KNL_32-LABEL: test20:
; KNL_32: # BB#0:
; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL_32-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; KNL_32-NEXT: vmovq {{.*#+}} xmm2 = xmm2[0],zero
+; KNL_32-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
; KNL_32-NEXT: vpxor %ymm3, %ymm3, %ymm3
; KNL_32-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; KNL_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1146,8 +1144,7 @@ define <2 x float> @test22(float* %base,
; KNL_64-LABEL: test22:
; KNL_64: # BB#0:
; KNL_64-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
-; KNL_64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; KNL_64-NEXT: vmovq {{.*#+}} xmm1 = xmm1[0],zero
+; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_64-NEXT: vpxor %ymm3, %ymm3, %ymm3
; KNL_64-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1161,8 +1158,7 @@ define <2 x float> @test22(float* %base,
; KNL_32-LABEL: test22:
; KNL_32: # BB#0:
; KNL_32-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
-; KNL_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; KNL_32-NEXT: vmovq {{.*#+}} xmm1 = xmm1[0],zero
+; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_32-NEXT: vpxor %ymm3, %ymm3, %ymm3
; KNL_32-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
Modified: llvm/trunk/test/CodeGen/X86/masked_memop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_memop.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_memop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_memop.ll Thu Dec 15 12:03:38 2016
@@ -440,8 +440,7 @@ define void @test14(<2 x i32> %trigger,
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; AVX1-NEXT: retq
;
@@ -450,8 +449,7 @@ define void @test14(<2 x i32> %trigger,
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; AVX2-NEXT: retq
;
@@ -460,8 +458,7 @@ define void @test14(<2 x i32> %trigger,
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; AVX512F-NEXT: retq
;
@@ -485,8 +482,7 @@ define void @test15(<2 x i32> %trigger,
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; AVX1-NEXT: retq
@@ -496,8 +492,7 @@ define void @test15(<2 x i32> %trigger,
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX2-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; AVX2-NEXT: retq
@@ -507,8 +502,7 @@ define void @test15(<2 x i32> %trigger,
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX512F-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; AVX512F-NEXT: retq
@@ -531,8 +525,7 @@ define <2 x float> @test16(<2 x i32> %tr
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX1-NEXT: retq
@@ -542,8 +535,7 @@ define <2 x float> @test16(<2 x i32> %tr
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX2-NEXT: retq
@@ -553,8 +545,7 @@ define <2 x float> @test16(<2 x i32> %tr
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
; AVX512F-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX512F-NEXT: retq
@@ -580,8 +571,7 @@ define <2 x i32> @test17(<2 x i32> %trig
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX1-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
@@ -593,8 +583,7 @@ define <2 x i32> @test17(<2 x i32> %trig
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX2-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
@@ -606,8 +595,7 @@ define <2 x i32> @test17(<2 x i32> %trig
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX512F-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
@@ -636,8 +624,7 @@ define <2 x float> @test18(<2 x i32> %tr
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -646,8 +633,7 @@ define <2 x float> @test18(<2 x i32> %tr
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0
; AVX2-NEXT: retq
;
@@ -656,8 +642,7 @@ define <2 x float> @test18(<2 x i32> %tr
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX512F-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0
; AVX512F-NEXT: retq
;
Modified: llvm/trunk/test/CodeGen/X86/oddshuffles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/oddshuffles.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/oddshuffles.ll (original)
+++ llvm/trunk/test/CodeGen/X86/oddshuffles.ll Thu Dec 15 12:03:38 2016
@@ -1102,28 +1102,28 @@ define void @interleave_24i32_out(<24 x
; SSE2-LABEL: interleave_24i32_out:
; SSE2: # BB#0:
; SSE2-NEXT: movdqu 80(%rdi), %xmm8
-; SSE2-NEXT: movdqu 64(%rdi), %xmm10
-; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: movdqu 16(%rdi), %xmm7
+; SSE2-NEXT: movups 64(%rdi), %xmm10
+; SSE2-NEXT: movups (%rdi), %xmm0
+; SSE2-NEXT: movups 16(%rdi), %xmm7
; SSE2-NEXT: movdqu 32(%rdi), %xmm9
-; SSE2-NEXT: movdqu 48(%rdi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm7[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,1,0,1]
-; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,0],xmm3[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,1,0,3]
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm10[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,1,0,1]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm6[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm1[2,0]
+; SSE2-NEXT: movups 48(%rdi), %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm10[2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[0,1,0,1]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm3[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,0]
+; SSE2-NEXT: movaps %xmm0, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm7[2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,1,0,1]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm5[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[2,0]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm7[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm7[3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,1,2,2]
-; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm0[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,1,2,2]
+; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,0],xmm0[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,3,0,1]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm10[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm10[3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,1,2,2]
@@ -1134,11 +1134,11 @@ define void @interleave_24i32_out(<24 x
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,1,0,3]
; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,1,0,3]
-; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm5[0],xmm1[1]
-; SSE2-NEXT: movups %xmm6, 16(%rsi)
-; SSE2-NEXT: movups %xmm3, (%rsi)
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm6[0],xmm1[1]
+; SSE2-NEXT: movups %xmm3, 16(%rsi)
+; SSE2-NEXT: movups %xmm5, (%rsi)
; SSE2-NEXT: movups %xmm2, 16(%rdx)
; SSE2-NEXT: movups %xmm0, (%rdx)
; SSE2-NEXT: movupd %xmm1, 16(%rcx)
@@ -1147,40 +1147,40 @@ define void @interleave_24i32_out(<24 x
;
; SSE42-LABEL: interleave_24i32_out:
; SSE42: # BB#0:
-; SSE42-NEXT: movdqu 80(%rdi), %xmm8
-; SSE42-NEXT: movdqu 64(%rdi), %xmm1
-; SSE42-NEXT: movdqu (%rdi), %xmm5
-; SSE42-NEXT: movdqu 16(%rdi), %xmm6
-; SSE42-NEXT: movdqu 32(%rdi), %xmm2
-; SSE42-NEXT: movdqu 48(%rdi), %xmm4
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,3,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1]
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm7[0,1,2,3,4,5],xmm3[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,3,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm1[4,5,6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,1,0,1]
-; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,5],xmm0[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm2[0,1,2,2]
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,0,1]
-; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm5[2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,0,3,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5],xmm9[6,7]
+; SSE42-NEXT: movdqu 80(%rdi), %xmm9
+; SSE42-NEXT: movdqu 64(%rdi), %xmm10
+; SSE42-NEXT: movdqu (%rdi), %xmm4
+; SSE42-NEXT: movdqu 16(%rdi), %xmm2
+; SSE42-NEXT: movdqu 32(%rdi), %xmm11
+; SSE42-NEXT: movdqu 48(%rdi), %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm11[0,1,0,1]
+; SSE42-NEXT: movdqa %xmm2, %xmm7
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm4[2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
-; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm1[2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm2[2,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5],xmm8[6,7]
+; SSE42-NEXT: movdqa %xmm10, %xmm1
+; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,0,1]
+; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm10[2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,1,0,1]
+; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,5],xmm3[6,7]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm11[0,1,2,2]
+; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,0,3,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5],xmm5[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,3]
-; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,1,2,2]
-; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,5],xmm4[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,0,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5,6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,1,0,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4,5,6,7]
-; SSE42-NEXT: movdqu %xmm0, 16(%rsi)
-; SSE42-NEXT: movdqu %xmm3, (%rsi)
-; SSE42-NEXT: movdqu %xmm4, 16(%rdx)
-; SSE42-NEXT: movdqu %xmm5, (%rdx)
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,1,2,2]
+; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm1[0,1,2,3,4,5],xmm5[6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm2[2,3],xmm6[4,5,6,7]
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,3],xmm0[4,5,6,7]
+; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,1,0,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE42-NEXT: movdqu %xmm3, 16(%rsi)
+; SSE42-NEXT: movdqu %xmm4, (%rsi)
+; SSE42-NEXT: movdqu %xmm5, 16(%rdx)
+; SSE42-NEXT: movdqu %xmm7, (%rdx)
; SSE42-NEXT: movdqu %xmm2, 16(%rcx)
; SSE42-NEXT: movdqu %xmm1, (%rcx)
; SSE42-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/palignr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/palignr.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/palignr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/palignr.ll Thu Dec 15 12:03:38 2016
@@ -41,9 +41,7 @@ define <4 x i32> @test3(<4 x i32> %A, <4
;
; CHECK-YONAH-LABEL: test3:
; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
-; CHECK-YONAH-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
; CHECK-YONAH-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
ret <4 x i32> %C
Modified: llvm/trunk/test/CodeGen/X86/pmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Thu Dec 15 12:03:38 2016
@@ -1153,32 +1153,30 @@ entry:
define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pmuludq %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
+; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE41-NEXT: pmuludq %xmm0, %xmm1
-; SSE41-NEXT: pmuludq %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pmuludq %xmm4, %xmm2
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
+; SSE41-NEXT: movaps %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v4i64_zero_upper:
@@ -1187,9 +1185,7 @@ define <4 x i32> @mul_v4i64_zero_upper(<
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1199,9 +1195,7 @@ define <4 x i32> @mul_v4i64_zero_upper(<
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX512-NEXT: retq
entry:
%val1a = zext <4 x i32> %val1 to <4 x i64>
@@ -1217,23 +1211,21 @@ define <4 x i32> @mul_v4i64_zero_upper_l
; SSE2: # BB#0: # %entry
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: psrlq $32, %xmm1
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: psllq $32, %xmm1
-; SSE2-NEXT: paddq %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: pmuludq %xmm2, %xmm3
; SSE2-NEXT: psrlq $32, %xmm2
-; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: pmuludq %xmm0, %xmm2
; SSE2-NEXT: psllq $32, %xmm2
-; SSE2-NEXT: paddq %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: paddq %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: psrlq $32, %xmm1
+; SSE2-NEXT: pmuludq %xmm4, %xmm1
+; SSE2-NEXT: psllq $32, %xmm1
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper_left:
@@ -1241,21 +1233,19 @@ define <4 x i32> @mul_v4i64_zero_upper_l
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE41-NEXT: movdqa %xmm4, %xmm3
-; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm1
-; SSE41-NEXT: pmuludq %xmm4, %xmm1
-; SSE41-NEXT: psllq $32, %xmm1
-; SSE41-NEXT: paddq %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pmuludq %xmm2, %xmm3
; SSE41-NEXT: psrlq $32, %xmm2
; SSE41-NEXT: pmuludq %xmm0, %xmm2
; SSE41-NEXT: psllq $32, %xmm2
; SSE41-NEXT: paddq %xmm3, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: pmuludq %xmm1, %xmm0
+; SSE41-NEXT: psrlq $32, %xmm1
+; SSE41-NEXT: pmuludq %xmm4, %xmm1
+; SSE41-NEXT: psllq $32, %xmm1
+; SSE41-NEXT: paddq %xmm1, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v4i64_zero_upper_left:
@@ -1267,9 +1257,7 @@ define <4 x i32> @mul_v4i64_zero_upper_l
; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1282,9 +1270,7 @@ define <4 x i32> @mul_v4i64_zero_upper_l
; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX512-NEXT: retq
entry:
%val1a = zext <4 x i32> %val1 to <4 x i64>
@@ -1297,35 +1283,33 @@ entry:
define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_lower:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: psrlq $32, %xmm1
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: psllq $32, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE2-NEXT: psrlq $32, %xmm2
-; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: pmuludq %xmm0, %xmm2
; SSE2-NEXT: psllq $32, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: psrlq $32, %xmm1
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: psllq $32, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm2[1,3]
+; SSE2-NEXT: movaps %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_lower:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm3, %xmm3
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE41-NEXT: psrlq $32, %xmm1
-; SSE41-NEXT: pmuludq %xmm4, %xmm1
-; SSE41-NEXT: psllq $32, %xmm1
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE41-NEXT: psrlq $32, %xmm2
; SSE41-NEXT: pmuludq %xmm0, %xmm2
; SSE41-NEXT: psllq $32, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: psrlq $32, %xmm1
+; SSE41-NEXT: pmuludq %xmm1, %xmm3
+; SSE41-NEXT: psllq $32, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm2[1,3]
+; SSE41-NEXT: movaps %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v4i64_zero_lower:
@@ -1335,9 +1319,7 @@ define <4 x i32> @mul_v4i64_zero_lower(<
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1348,9 +1330,7 @@ define <4 x i32> @mul_v4i64_zero_lower(<
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX512-NEXT: retq
entry:
%val1a = zext <4 x i32> %val1 to <4 x i64>
@@ -1364,52 +1344,48 @@ entry:
define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_zero_upper:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm8
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm7
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: pmuludq %xmm6, %xmm5
+; SSE2-NEXT: pmuludq %xmm7, %xmm5
; SSE2-NEXT: pmuludq %xmm0, %xmm2
-; SSE2-NEXT: pmuludq %xmm8, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: pmuludq %xmm8, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
+; SSE2-NEXT: movaps %xmm4, %xmm0
+; SSE2-NEXT: movaps %xmm5, %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_zero_upper:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pxor %xmm6, %xmm6
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm3[0],zero,xmm3[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
; SSE41-NEXT: pmuludq %xmm1, %xmm3
; SSE41-NEXT: pmuludq %xmm0, %xmm2
-; SSE41-NEXT: pmuludq %xmm6, %xmm5
-; SSE41-NEXT: pmuludq %xmm8, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,1,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,3,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pmuludq %xmm7, %xmm5
+; SSE41-NEXT: pmuludq %xmm8, %xmm4
+; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
+; SSE41-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
+; SSE41-NEXT: movaps %xmm4, %xmm0
+; SSE41-NEXT: movaps %xmm5, %xmm1
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v8i64_zero_upper:
Modified: llvm/trunk/test/CodeGen/X86/reduce-trunc-shl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/reduce-trunc-shl.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/reduce-trunc-shl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/reduce-trunc-shl.ll Thu Dec 15 12:03:38 2016
@@ -5,11 +5,10 @@
define void @trunc_shl_7_v4i32_v4i64(<4 x i32> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
; SSE2-LABEL: trunc_shl_7_v4i32_v4i64:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: pslld $7, %xmm1
-; SSE2-NEXT: movdqa %xmm1, (%rdi)
+; SSE2-NEXT: movaps (%rsi), %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
+; SSE2-NEXT: pslld $7, %xmm0
+; SSE2-NEXT: movdqa %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_7_v4i32_v4i64:
Modified: llvm/trunk/test/CodeGen/X86/sse-fsignum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-fsignum.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-fsignum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-fsignum.ll Thu Dec 15 12:03:38 2016
@@ -236,13 +236,11 @@ define void @signum64c(<4 x double>*) {
; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: vmovaps %ymm0, (%rdi)
; AVX1-NEXT: vzeroupper
@@ -256,9 +254,7 @@ define void @signum64c(<4 x double>*) {
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: vmovaps %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
@@ -272,9 +268,7 @@ define void @signum64c(<4 x double>*) {
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512F-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512F-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512F-NEXT: vmovaps %ymm0, (%rdi)
; AVX512F-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll Thu Dec 15 12:03:38 2016
@@ -347,28 +347,29 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_4i32:
; SSE: # BB#0:
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: movapd %xmm0, %xmm2
-; SSE-NEXT: subsd %xmm1, %xmm2
-; SSE-NEXT: cvttsd2si %xmm2, %rax
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movapd %xmm0, %xmm1
+; SSE-NEXT: subsd %xmm2, %xmm1
+; SSE-NEXT: cvttsd2si %xmm1, %rax
; SSE-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rdx
-; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovaeq %rax, %rdx
-; SSE-NEXT: movd %rdx, %xmm2
+; SSE-NEXT: movd %rdx, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: subsd %xmm1, %xmm3
+; SSE-NEXT: subsd %xmm2, %xmm3
; SSE-NEXT: cvttsd2si %xmm3, %rax
; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rcx
-; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovaeq %rax, %rcx
; SSE-NEXT: movd %rcx, %xmm0
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,2]
-; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_4i32:
@@ -391,8 +392,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2
; VEX-NEXT: cmovaeq %rax, %rcx
; VEX-NEXT: vmovq %rcx, %xmm0
; VEX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; VEX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; VEX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_4i32:
@@ -503,35 +503,34 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2
define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_2i32:
; SSE: # BB#0:
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: movapd %xmm0, %xmm2
-; SSE-NEXT: subsd %xmm1, %xmm2
-; SSE-NEXT: cvttsd2si %xmm2, %rax
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movapd %xmm0, %xmm1
+; SSE-NEXT: subsd %xmm2, %xmm1
+; SSE-NEXT: cvttsd2si %xmm1, %rax
; SSE-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rdx
-; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovaeq %rax, %rdx
-; SSE-NEXT: movd %rdx, %xmm2
+; SSE-NEXT: movd %rdx, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: subsd %xmm1, %xmm3
+; SSE-NEXT: subsd %xmm2, %xmm3
; SSE-NEXT: cvttsd2si %xmm3, %rax
; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: cvttsd2si %xmm0, %rdx
-; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovaeq %rax, %rdx
; SSE-NEXT: movd %rdx, %xmm0
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: xorq %rax, %rcx
-; SSE-NEXT: ucomisd %xmm1, %xmm0
+; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovbq %rax, %rcx
-; SSE-NEXT: movd %rcx, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: movd %rcx, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f64_to_2i32:
@@ -774,15 +773,14 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movd %rdx, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE-NEXT: movapd %xmm0, %xmm3
-; SSE-NEXT: subsd %xmm2, %xmm3
-; SSE-NEXT: cvttsd2si %xmm3, %rcx
+; SSE-NEXT: movapd %xmm0, %xmm1
+; SSE-NEXT: subsd %xmm2, %xmm1
+; SSE-NEXT: cvttsd2si %xmm1, %rcx
; SSE-NEXT: xorq %rax, %rcx
; SSE-NEXT: cvttsd2si %xmm0, %rdx
; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovaeq %rcx, %rdx
-; SSE-NEXT: movd %rdx, %xmm3
+; SSE-NEXT: movd %rdx, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movaps %xmm0, %xmm4
; SSE-NEXT: subsd %xmm2, %xmm4
@@ -792,9 +790,9 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
; SSE-NEXT: ucomisd %xmm2, %xmm0
; SSE-NEXT: cmovaeq %rcx, %rax
; SSE-NEXT: movd %rax, %xmm0
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f64_to_4i32:
@@ -2248,12 +2246,12 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; SSE-NEXT: movzwl %ax, %edi
; SSE-NEXT: callq __gnu_h2f_ieee
; SSE-NEXT: cvttss2si %xmm0, %rax
-; SSE-NEXT: movd %rax, %xmm0
-; SSE-NEXT: cvttss2si (%rsp), %rax # 4-byte Folded Reload
; SSE-NEXT: movd %rax, %xmm1
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,2]
-; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: cvttss2si (%rsp), %rax # 4-byte Folded Reload
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSE-NEXT: popq %rax
; SSE-NEXT: retq
;
@@ -2275,8 +2273,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; VEX-NEXT: vcvttss2si (%rsp), %rax # 4-byte Folded Reload
; VEX-NEXT: vmovq %rax, %xmm1
; VEX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; VEX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; VEX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; VEX-NEXT: popq %rax
; VEX-NEXT: retq
;
@@ -2293,8 +2290,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; AVX512F-NEXT: vcvttss2si %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm0
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_2f16_to_4i32:
@@ -2308,8 +2304,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; AVX512VL-NEXT: vcvttss2si %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm0
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_2f16_to_4i32:
@@ -2325,8 +2320,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; AVX512DQ-NEXT: vcvttss2si %xmm0, %rax
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512DQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f16_to_4i32:
@@ -2340,8 +2334,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; AVX512VLDQ-NEXT: vcvttss2si %xmm0, %rax
; AVX512VLDQ-NEXT: vmovq %rax, %xmm0
; AVX512VLDQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512VLDQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VLDQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512VLDQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <2 x half> %a to <2 x i32>
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2367,11 +2360,11 @@ define <4 x i32> @fptosi_2f80_to_4i32(<2
; SSE-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; SSE-NEXT: fistpll -{{[0-9]+}}(%rsp)
; SSE-NEXT: fldcw -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,2]
-; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f80_to_4i32:
@@ -2383,8 +2376,7 @@ define <4 x i32> @fptosi_2f80_to_4i32(<2
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX-NEXT: retq
%cvt = fptosi <2 x x86_fp80> %a to <2 x i32>
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2410,8 +2402,8 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSE-NEXT: addq $24, %rsp
; SSE-NEXT: popq %rbx
; SSE-NEXT: popq %r14
@@ -2435,8 +2427,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; VEX-NEXT: vmovq %rax, %xmm0
; VEX-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; VEX-NEXT: # xmm0 = xmm0[0],mem[0]
-; VEX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; VEX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; VEX-NEXT: addq $24, %rsp
; VEX-NEXT: popq %rbx
; VEX-NEXT: popq %r14
@@ -2460,8 +2451,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; AVX512F-NEXT: vmovq %rax, %xmm0
; AVX512F-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512F-NEXT: # xmm0 = xmm0[0],mem[0]
-; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512F-NEXT: addq $24, %rsp
; AVX512F-NEXT: popq %rbx
; AVX512F-NEXT: popq %r14
@@ -2485,8 +2475,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; AVX512VL-NEXT: vmovq %rax, %xmm0
; AVX512VL-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512VL-NEXT: addq $24, %rsp
; AVX512VL-NEXT: popq %rbx
; AVX512VL-NEXT: popq %r14
@@ -2510,8 +2499,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512DQ-NEXT: # xmm0 = xmm0[0],mem[0]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512DQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512DQ-NEXT: addq $24, %rsp
; AVX512DQ-NEXT: popq %rbx
; AVX512DQ-NEXT: popq %r14
@@ -2535,8 +2523,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; AVX512VLDQ-NEXT: vmovq %rax, %xmm0
; AVX512VLDQ-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX512VLDQ-NEXT: # xmm0 = xmm0[0],mem[0]
-; AVX512VLDQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VLDQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512VLDQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX512VLDQ-NEXT: addq $24, %rsp
; AVX512VLDQ-NEXT: popq %rbx
; AVX512VLDQ-NEXT: popq %r14
Modified: llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-results.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-results.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-results.ll Thu Dec 15 12:03:38 2016
@@ -6299,16 +6299,12 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE2-NEXT: cmpltpd %xmm7, %xmm8
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: cmpltpd %xmm6, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm8[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm6[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm8[0,2]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
; SSE2-NEXT: cmpltpd %xmm5, %xmm6
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: cmpltpd %xmm4, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
; SSE2-NEXT: pslld $31, %xmm7
; SSE2-NEXT: psrad $31, %xmm7
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm7[0,2,2,3,4,5,6,7]
@@ -6322,20 +6318,16 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: cmpltpd %xmm3, %xmm5
-; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: cmpltpd %xmm2, %xmm3
+; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
+; SSE2-NEXT: cmpltpd %xmm2, %xmm6
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm5[0,2]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: cmpltpd %xmm1, %xmm5
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: psllw $15, %xmm4
; SSE2-NEXT: psraw $15, %xmm4
@@ -6358,16 +6350,12 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[0,2]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
; SSE2-NEXT: pslld $31, %xmm5
; SSE2-NEXT: psrad $31, %xmm5
@@ -6382,16 +6370,12 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE2-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
; SSE2-NEXT: pslld $31, %xmm4
; SSE2-NEXT: psrad $31, %xmm4
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
@@ -6523,18 +6507,14 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE42-NEXT: cmpltpd %xmm7, %xmm8
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm7
; SSE42-NEXT: cmpltpd %xmm6, %xmm7
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm8[0,1,0,2]
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm8[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: cmpltpd %xmm5, %xmm6
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: cmpltpd %xmm4, %xmm5
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
-; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,1,0,2]
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE42-NEXT: pshufb %xmm4, %xmm7
; SSE42-NEXT: pslld $31, %xmm5
@@ -6546,99 +6526,87 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
; SSE42-NEXT: cmpltpd %xmm2, %xmm3
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
-; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: cmpltpd %xmm1, %xmm6
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: cmpltpd %xmm0, %xmm7
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
+; SSE42-NEXT: cmpltpd %xmm0, %xmm1
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
+; SSE42-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: psllw $15, %xmm5
; SSE42-NEXT: psraw $15, %xmm5
; SSE42-NEXT: pslld $31, %xmm3
; SSE42-NEXT: psrad $31, %xmm3
; SSE42-NEXT: pshufb %xmm4, %xmm3
-; SSE42-NEXT: pslld $31, %xmm0
-; SSE42-NEXT: psrad $31, %xmm0
-; SSE42-NEXT: pshufb %xmm4, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSE42-NEXT: pslld $31, %xmm1
+; SSE42-NEXT: psrad $31, %xmm1
+; SSE42-NEXT: pshufb %xmm4, %xmm1
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE42-NEXT: pshufb %xmm3, %xmm5
-; SSE42-NEXT: psllw $15, %xmm0
-; SSE42-NEXT: psraw $15, %xmm0
-; SSE42-NEXT: pshufb %xmm3, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; SSE42-NEXT: psllw $15, %xmm1
+; SSE42-NEXT: psraw $15, %xmm1
+; SSE42-NEXT: pshufb %xmm3, %xmm1
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,2]
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm5[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,1,0,2]
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm0
+; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: pslld $31, %xmm6
; SSE42-NEXT: psrad $31, %xmm6
; SSE42-NEXT: pshufb %xmm4, %xmm6
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pshufb %xmm4, %xmm5
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pshufb %xmm4, %xmm0
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
+; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,1,0,2]
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pslld $31, %xmm7
-; SSE42-NEXT: psrad $31, %xmm7
-; SSE42-NEXT: pshufb %xmm4, %xmm7
-; SSE42-NEXT: pslld $31, %xmm1
-; SSE42-NEXT: psrad $31, %xmm1
-; SSE42-NEXT: pshufb %xmm4, %xmm1
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm7[0]
-; SSE42-NEXT: psllw $15, %xmm5
-; SSE42-NEXT: psraw $15, %xmm5
-; SSE42-NEXT: pshufb %xmm3, %xmm5
-; SSE42-NEXT: psllw $15, %xmm1
-; SSE42-NEXT: psraw $15, %xmm1
-; SSE42-NEXT: pshufb %xmm3, %xmm1
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE42-NEXT: pextrb $15, %xmm1, %eax
+; SSE42-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE42-NEXT: pslld $31, %xmm5
+; SSE42-NEXT: psrad $31, %xmm5
+; SSE42-NEXT: pshufb %xmm4, %xmm5
+; SSE42-NEXT: pslld $31, %xmm2
+; SSE42-NEXT: psrad $31, %xmm2
+; SSE42-NEXT: pshufb %xmm4, %xmm2
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; SSE42-NEXT: psllw $15, %xmm0
+; SSE42-NEXT: psraw $15, %xmm0
+; SSE42-NEXT: pshufb %xmm3, %xmm0
+; SSE42-NEXT: psllw $15, %xmm2
+; SSE42-NEXT: psraw $15, %xmm2
+; SSE42-NEXT: pshufb %xmm3, %xmm2
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSE42-NEXT: pextrb $15, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm1, %eax
+; SSE42-NEXT: pextrb $14, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm1, %r8d
-; SSE42-NEXT: pextrb $12, %xmm1, %r9d
-; SSE42-NEXT: pextrb $11, %xmm1, %r10d
-; SSE42-NEXT: pextrb $10, %xmm1, %r11d
-; SSE42-NEXT: pextrb $9, %xmm1, %r14d
-; SSE42-NEXT: pextrb $8, %xmm1, %r15d
-; SSE42-NEXT: pextrb $7, %xmm1, %r12d
-; SSE42-NEXT: pextrb $6, %xmm1, %r13d
-; SSE42-NEXT: pextrb $5, %xmm1, %ebx
-; SSE42-NEXT: pextrb $4, %xmm1, %ebp
-; SSE42-NEXT: pextrb $3, %xmm1, %eax
-; SSE42-NEXT: pextrb $2, %xmm1, %ecx
-; SSE42-NEXT: pextrb $1, %xmm1, %edx
-; SSE42-NEXT: pextrb $0, %xmm1, %esi
+; SSE42-NEXT: pextrb $13, %xmm2, %r8d
+; SSE42-NEXT: pextrb $12, %xmm2, %r9d
+; SSE42-NEXT: pextrb $11, %xmm2, %r10d
+; SSE42-NEXT: pextrb $10, %xmm2, %r11d
+; SSE42-NEXT: pextrb $9, %xmm2, %r14d
+; SSE42-NEXT: pextrb $8, %xmm2, %r15d
+; SSE42-NEXT: pextrb $7, %xmm2, %r12d
+; SSE42-NEXT: pextrb $6, %xmm2, %r13d
+; SSE42-NEXT: pextrb $5, %xmm2, %ebx
+; SSE42-NEXT: pextrb $4, %xmm2, %ebp
+; SSE42-NEXT: pextrb $3, %xmm2, %eax
+; SSE42-NEXT: pextrb $2, %xmm2, %ecx
+; SSE42-NEXT: pextrb $1, %xmm2, %edx
+; SSE42-NEXT: pextrb $0, %xmm2, %esi
; SSE42-NEXT: andb $1, %r8b
; SSE42-NEXT: movb %r8b, 2(%rdi)
; SSE42-NEXT: andb $1, %r9b
@@ -6667,26 +6635,26 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE42-NEXT: movb %dl, 2(%rdi)
; SSE42-NEXT: andb $1, %sil
; SSE42-NEXT: movb %sil, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: pextrb $15, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm0, %r8d
-; SSE42-NEXT: pextrb $12, %xmm0, %r9d
-; SSE42-NEXT: pextrb $11, %xmm0, %r10d
-; SSE42-NEXT: pextrb $10, %xmm0, %r11d
-; SSE42-NEXT: pextrb $9, %xmm0, %r14d
-; SSE42-NEXT: pextrb $8, %xmm0, %r15d
-; SSE42-NEXT: pextrb $7, %xmm0, %r12d
-; SSE42-NEXT: pextrb $6, %xmm0, %r13d
-; SSE42-NEXT: pextrb $5, %xmm0, %ebx
-; SSE42-NEXT: pextrb $4, %xmm0, %ebp
-; SSE42-NEXT: pextrb $3, %xmm0, %eax
-; SSE42-NEXT: pextrb $2, %xmm0, %ecx
-; SSE42-NEXT: pextrb $1, %xmm0, %edx
-; SSE42-NEXT: pextrb $0, %xmm0, %esi
+; SSE42-NEXT: pextrb $13, %xmm1, %r8d
+; SSE42-NEXT: pextrb $12, %xmm1, %r9d
+; SSE42-NEXT: pextrb $11, %xmm1, %r10d
+; SSE42-NEXT: pextrb $10, %xmm1, %r11d
+; SSE42-NEXT: pextrb $9, %xmm1, %r14d
+; SSE42-NEXT: pextrb $8, %xmm1, %r15d
+; SSE42-NEXT: pextrb $7, %xmm1, %r12d
+; SSE42-NEXT: pextrb $6, %xmm1, %r13d
+; SSE42-NEXT: pextrb $5, %xmm1, %ebx
+; SSE42-NEXT: pextrb $4, %xmm1, %ebp
+; SSE42-NEXT: pextrb $3, %xmm1, %eax
+; SSE42-NEXT: pextrb $2, %xmm1, %ecx
+; SSE42-NEXT: pextrb $1, %xmm1, %edx
+; SSE42-NEXT: pextrb $0, %xmm1, %esi
; SSE42-NEXT: andb $1, %r8b
; SSE42-NEXT: movb %r8b, (%rdi)
; SSE42-NEXT: andb $1, %r9b
@@ -7053,11 +7021,10 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pcmpgtd %xmm9, %xmm10
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm7, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,3,3]
-; SSE2-NEXT: pand %xmm11, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm10[1,1,3,3]
-; SSE2-NEXT: por %xmm9, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm7[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm9[1,1,3,3]
+; SSE2-NEXT: pand %xmm11, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm9
; SSE2-NEXT: pxor %xmm8, %xmm6
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
@@ -7069,11 +7036,10 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm11, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm10[1,1,3,3]
; SSE2-NEXT: por %xmm6, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm9[0]
-; SSE2-NEXT: pslld $31, %xmm6
-; SSE2-NEXT: psrad $31, %xmm6
-; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm9[0,2]
+; SSE2-NEXT: pslld $31, %xmm7
+; SSE2-NEXT: psrad $31, %xmm7
+; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,2,2,3]
; SSE2-NEXT: pxor %xmm8, %xmm5
@@ -7087,23 +7053,21 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm10, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,2,2,3]
; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
+; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa %xmm4, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm10[0]
-; SSE2-NEXT: pslld $31, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
+; SSE2-NEXT: pslld $31, %xmm5
+; SSE2-NEXT: psrad $31, %xmm5
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm9[0]
@@ -7122,23 +7086,21 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm4, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
+; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm7, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE2-NEXT: pslld $31, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
+; SSE2-NEXT: pslld $31, %xmm3
+; SSE2-NEXT: psrad $31, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: pxor %xmm8, %xmm1
@@ -7152,23 +7114,21 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: pslld $31, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE2-NEXT: pslld $31, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -7188,24 +7148,22 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE2-NEXT: pslld $31, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm2[0,2]
+; SSE2-NEXT: pslld $31, %xmm3
+; SSE2-NEXT: psrad $31, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
@@ -7220,24 +7178,22 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE2-NEXT: pslld $31, %xmm3
-; SSE2-NEXT: psrad $31, %xmm3
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE2-NEXT: pslld $31, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
@@ -7256,24 +7212,22 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
-; SSE2-NEXT: pslld $31, %xmm3
-; SSE2-NEXT: psrad $31, %xmm3
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
+; SSE2-NEXT: pslld $31, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
@@ -7288,23 +7242,21 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
+; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: pxor {{[0-9]+}}(%rsp), %xmm8
; SSE2-NEXT: movdqa %xmm8, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm4, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
-; SSE2-NEXT: pslld $31, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[0,2]
+; SSE2-NEXT: pslld $31, %xmm5
+; SSE2-NEXT: psrad $31, %xmm5
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
@@ -7415,145 +7367,128 @@ define <32 x i1> @test_cmp_v32i64(<32 x
;
; SSE42-LABEL: test_cmp_v32i64:
; SSE42: # BB#0:
-; SSE42-NEXT: movdqa %xmm0, %xmm8
-; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
-; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
-; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,2]
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE42-NEXT: pslld $31, %xmm6
; SSE42-NEXT: psrad $31, %xmm6
; SSE42-NEXT: movdqa {{.*#+}} xmm7 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE42-NEXT: pshufb %xmm7, %xmm6
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,1,0,2]
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm4
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm0[4,5,6,7]
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pshufb %xmm7, %xmm5
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
-; SSE42-NEXT: psllw $15, %xmm5
-; SSE42-NEXT: psraw $15, %xmm5
-; SSE42-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm4, %xmm5
+; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
+; SSE42-NEXT: pslld $31, %xmm4
+; SSE42-NEXT: psrad $31, %xmm4
+; SSE42-NEXT: pshufb %xmm7, %xmm4
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; SSE42-NEXT: psllw $15, %xmm4
+; SSE42-NEXT: psraw $15, %xmm4
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE42-NEXT: pshufb %xmm5, %xmm4
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm3
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,2]
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE42-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE42-NEXT: pslld $31, %xmm2
; SSE42-NEXT: psrad $31, %xmm2
; SSE42-NEXT: pshufb %xmm7, %xmm2
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm0
+; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE42-NEXT: pslld $31, %xmm0
; SSE42-NEXT: psrad $31, %xmm0
; SSE42-NEXT: pshufb %xmm7, %xmm0
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE42-NEXT: psllw $15, %xmm0
; SSE42-NEXT: psraw $15, %xmm0
-; SSE42-NEXT: pshufb %xmm4, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; SSE42-NEXT: pshufb %xmm5, %xmm0
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm15
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm15[0,1,0,2]
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm14
-; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm14[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; SSE42-NEXT: pslld $31, %xmm3
-; SSE42-NEXT: psrad $31, %xmm3
-; SSE42-NEXT: pshufb %xmm7, %xmm3
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm13
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,1,0,2]
+; SSE42-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2],xmm15[0,2]
+; SSE42-NEXT: pslld $31, %xmm13
+; SSE42-NEXT: psrad $31, %xmm13
+; SSE42-NEXT: pshufb %xmm7, %xmm13
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm14
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
+; SSE42-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm14[0,2]
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pshufb %xmm7, %xmm9
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm13[0]
+; SSE42-NEXT: psllw $15, %xmm9
+; SSE42-NEXT: psraw $15, %xmm9
+; SSE42-NEXT: pshufb %xmm5, %xmm9
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm12
-; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm12[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE42-NEXT: pslld $31, %xmm2
-; SSE42-NEXT: psrad $31, %xmm2
-; SSE42-NEXT: pshufb %xmm7, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE42-NEXT: psllw $15, %xmm2
-; SSE42-NEXT: psraw $15, %xmm2
-; SSE42-NEXT: pshufb %xmm4, %xmm2
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,2]
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
-; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; SSE42-NEXT: pslld $31, %xmm3
-; SSE42-NEXT: psrad $31, %xmm3
-; SSE42-NEXT: pshufb %xmm7, %xmm3
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,1,0,2]
-; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
-; SSE42-NEXT: pslld $31, %xmm1
-; SSE42-NEXT: psrad $31, %xmm1
-; SSE42-NEXT: pshufb %xmm7, %xmm1
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE42-NEXT: psllw $15, %xmm1
-; SSE42-NEXT: psraw $15, %xmm1
-; SSE42-NEXT: pshufb %xmm4, %xmm1
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE42-NEXT: pextrb $15, %xmm1, %eax
+; SSE42-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm12[0,2]
+; SSE42-NEXT: pslld $31, %xmm10
+; SSE42-NEXT: psrad $31, %xmm10
+; SSE42-NEXT: pshufb %xmm7, %xmm10
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[0,2]
+; SSE42-NEXT: pslld $31, %xmm8
+; SSE42-NEXT: psrad $31, %xmm8
+; SSE42-NEXT: pshufb %xmm7, %xmm8
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
+; SSE42-NEXT: psllw $15, %xmm8
+; SSE42-NEXT: psraw $15, %xmm8
+; SSE42-NEXT: pshufb %xmm5, %xmm8
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
+; SSE42-NEXT: pextrb $15, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm1, %eax
+; SSE42-NEXT: pextrb $14, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm1, %eax
+; SSE42-NEXT: pextrb $13, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm1, %eax
+; SSE42-NEXT: pextrb $12, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm1, %eax
+; SSE42-NEXT: pextrb $11, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm1, %eax
+; SSE42-NEXT: pextrb $10, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm1, %eax
+; SSE42-NEXT: pextrb $9, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm1, %eax
+; SSE42-NEXT: pextrb $8, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm1, %eax
+; SSE42-NEXT: pextrb $7, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm1, %eax
+; SSE42-NEXT: pextrb $6, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm1, %eax
+; SSE42-NEXT: pextrb $5, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm1, %eax
+; SSE42-NEXT: pextrb $4, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm1, %eax
+; SSE42-NEXT: pextrb $3, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm1, %eax
+; SSE42-NEXT: pextrb $2, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm1, %eax
+; SSE42-NEXT: pextrb $1, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $0, %xmm1, %eax
+; SSE42-NEXT: pextrb $0, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
; SSE42-NEXT: pextrb $15, %xmm0, %eax
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll Thu Dec 15 12:03:38 2016
@@ -2207,8 +2207,7 @@ define <4 x i32> @insert_mem_lo_v4i32(<2
; AVX512VL-LABEL: insert_mem_lo_v4i32:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[2,3]
; AVX512VL-NEXT: retq
%a = load <2 x i32>, <2 x i32>* %ptr
%v = shufflevector <2 x i32> %a, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -2250,8 +2249,7 @@ define <4 x i32> @insert_mem_hi_v4i32(<2
; AVX512VL-LABEL: insert_mem_hi_v4i32:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; AVX512VL-NEXT: retq
%a = load <2 x i32>, <2 x i32>* %ptr
%v = shufflevector <2 x i32> %a, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll Thu Dec 15 12:03:38 2016
@@ -517,42 +517,17 @@ define <4 x i32> @combine_bitwise_ops_te
}
define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE2-LABEL: combine_bitwise_ops_test1c:
-; SSE2: # BB#0:
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_bitwise_ops_test1c:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: pand %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: retq
+; SSE-LABEL: combine_bitwise_ops_test1c:
+; SSE: # BB#0:
+; SSE-NEXT: andps %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
+; SSE-NEXT: retq
;
-; SSE41-LABEL: combine_bitwise_ops_test1c:
-; SSE41: # BB#0:
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: combine_bitwise_ops_test1c:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: combine_bitwise_ops_test1c:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: combine_bitwise_ops_test1c:
+; AVX: # BB#0:
+; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
+; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%and = and <4 x i32> %shuf1, %shuf2
@@ -560,42 +535,17 @@ define <4 x i32> @combine_bitwise_ops_te
}
define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE2-LABEL: combine_bitwise_ops_test2c:
-; SSE2: # BB#0:
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_bitwise_ops_test2c:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: por %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: combine_bitwise_ops_test2c:
-; SSE41: # BB#0:
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE41-NEXT: retq
+; SSE-LABEL: combine_bitwise_ops_test2c:
+; SSE: # BB#0:
+; SSE-NEXT: orps %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
+; SSE-NEXT: retq
;
-; AVX1-LABEL: combine_bitwise_ops_test2c:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: combine_bitwise_ops_test2c:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: combine_bitwise_ops_test2c:
+; AVX: # BB#0:
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
+; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%or = or <4 x i32> %shuf1, %shuf2
@@ -605,30 +555,28 @@ define <4 x i32> @combine_bitwise_ops_te
define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test3c:
; SSE2: # BB#0:
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: xorps %xmm1, %xmm0
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test3c:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pxor %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: xorps %xmm1, %xmm0
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test3c:
; SSE41: # BB#0:
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE41-NEXT: xorps %xmm1, %xmm0
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test3c:
; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
@@ -637,42 +585,18 @@ define <4 x i32> @combine_bitwise_ops_te
}
define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE2-LABEL: combine_bitwise_ops_test4c:
-; SSE2: # BB#0:
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_bitwise_ops_test4c:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: pand %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: retq
+; SSE-LABEL: combine_bitwise_ops_test4c:
+; SSE: # BB#0:
+; SSE-NEXT: andps %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; SSE41-LABEL: combine_bitwise_ops_test4c:
-; SSE41: # BB#0:
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: combine_bitwise_ops_test4c:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: combine_bitwise_ops_test4c:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: combine_bitwise_ops_test4c:
+; AVX: # BB#0:
+; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
+; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%and = and <4 x i32> %shuf1, %shuf2
@@ -680,42 +604,18 @@ define <4 x i32> @combine_bitwise_ops_te
}
define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE2-LABEL: combine_bitwise_ops_test5c:
-; SSE2: # BB#0:
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
+; SSE-LABEL: combine_bitwise_ops_test5c:
+; SSE: # BB#0:
+; SSE-NEXT: orps %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; SSSE3-LABEL: combine_bitwise_ops_test5c:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: por %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: combine_bitwise_ops_test5c:
-; SSE41: # BB#0:
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: combine_bitwise_ops_test5c:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: combine_bitwise_ops_test5c:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: combine_bitwise_ops_test5c:
+; AVX: # BB#0:
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
+; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%or = or <4 x i32> %shuf1, %shuf2
@@ -725,41 +625,31 @@ define <4 x i32> @combine_bitwise_ops_te
define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test6c:
; SSE2: # BB#0:
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: xorps %xmm1, %xmm0
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test6c:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pxor %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSSE3-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSSE3-NEXT: xorps %xmm1, %xmm0
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test6c:
; SSE41: # BB#0:
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: xorps %xmm1, %xmm0
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: combine_bitwise_ops_test6c:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: combine_bitwise_ops_test6c:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: combine_bitwise_ops_test6c:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
+; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%xor = xor <4 x i32> %shuf1, %shuf2
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll Thu Dec 15 12:03:38 2016
@@ -12,22 +12,18 @@
define <4 x i32> @trunc_add_v4i64_4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_add_v4i64_4i32:
; SSE: # BB#0:
-; SSE-NEXT: paddq %xmm2, %xmm0
; SSE-NEXT: paddq %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v4i64_4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -398,11 +394,9 @@ define <4 x i32> @trunc_add_const_v4i64_
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
-; SSE-NEXT: paddq %xmm0, %xmm2
+; SSE-NEXT: paddq %xmm2, %xmm0
; SSE-NEXT: paddq {{.*}}(%rip), %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v4i64_4i32:
@@ -413,9 +407,7 @@ define <4 x i32> @trunc_add_const_v4i64_
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -784,22 +776,18 @@ define <16 x i8> @trunc_add_const_v16i16
define <4 x i32> @trunc_sub_v4i64_4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_sub_v4i64_4i32:
; SSE: # BB#0:
-; SSE-NEXT: psubq %xmm2, %xmm0
; SSE-NEXT: psubq %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: psubq %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v4i64_4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1172,9 +1160,7 @@ define <4 x i32> @trunc_sub_const_v4i64_
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm2, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v4i64_4i32:
@@ -1185,9 +1171,7 @@ define <4 x i32> @trunc_sub_const_v4i64_
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1557,46 +1541,44 @@ define <16 x i8> @trunc_sub_const_v16i16
define <4 x i32> @trunc_mul_v4i64_4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_mul_v4i64_4i32:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pmuludq %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm0, %xmm5
+; SSE-NEXT: pmuludq %xmm1, %xmm5
; SSE-NEXT: psllq $32, %xmm5
-; SSE-NEXT: psrlq $32, %xmm0
-; SSE-NEXT: pmuludq %xmm2, %xmm0
-; SSE-NEXT: psllq $32, %xmm0
-; SSE-NEXT: paddq %xmm5, %xmm0
-; SSE-NEXT: paddq %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: pmuludq %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm1, %xmm4
-; SSE-NEXT: psllq $32, %xmm4
; SSE-NEXT: psrlq $32, %xmm1
; SSE-NEXT: pmuludq %xmm3, %xmm1
; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm5, %xmm1
; SSE-NEXT: paddq %xmm4, %xmm1
-; SSE-NEXT: paddq %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm0, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: paddq %xmm3, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v4i64_4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm4
-; AVX1-NEXT: vpmuludq %xmm1, %xmm4, %xmm4
-; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
-; AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm5
+; AVX1-NEXT: vpmuludq %xmm5, %xmm3, %xmm5
+; AVX1-NEXT: vpsllq $32, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
@@ -1606,9 +1588,7 @@ define <4 x i32> @trunc_mul_v4i64_4i32(<
; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2324,6 +2304,13 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
define <4 x i32> @trunc_mul_const_v4i64_4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v4i64_4i32:
; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,3]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm2, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
@@ -2333,38 +2320,27 @@ define <4 x i32> @trunc_mul_const_v4i64_
; SSE-NEXT: pmuludq %xmm2, %xmm0
; SSE-NEXT: psllq $32, %xmm0
; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,3]
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: pmuludq %xmm2, %xmm1
-; SSE-NEXT: psllq $32, %xmm1
-; SSE-NEXT: paddq %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v4i64_4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: movl $1, %eax
-; AVX1-NEXT: vmovq %rax, %xmm1
-; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
-; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3]
+; AVX1-NEXT: vpmuludq %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: movl $1, %eax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2959,20 +2935,16 @@ define <16 x i8> @trunc_mul_const_v16i16
define <4 x i32> @trunc_and_v4i64_4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_and_v4i64_4i32:
; SSE: # BB#0:
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: andps %xmm3, %xmm1
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v4i64_4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -3323,20 +3295,16 @@ define <4 x i32> @trunc_and_const_v4i64_
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: andps {{.*}}(%rip), %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v4i64_4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -3689,20 +3657,16 @@ define <16 x i8> @trunc_and_const_v16i16
define <4 x i32> @trunc_xor_v4i64_4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_xor_v4i64_4i32:
; SSE: # BB#0:
-; SSE-NEXT: pxor %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: xorps %xmm3, %xmm1
+; SSE-NEXT: xorps %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v4i64_4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -4053,20 +4017,16 @@ define <4 x i32> @trunc_xor_const_v4i64_
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pxor {{.*}}(%rip), %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: xorps {{.*}}(%rip), %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v4i64_4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -4419,20 +4379,16 @@ define <16 x i8> @trunc_xor_const_v16i16
define <4 x i32> @trunc_or_v4i64_4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_or_v4i64_4i32:
; SSE: # BB#0:
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: orps %xmm3, %xmm1
+; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v4i64_4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -4783,20 +4739,16 @@ define <4 x i32> @trunc_or_const_v4i64_4
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: por {{.*}}(%rip), %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: orps {{.*}}(%rip), %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v4i64_4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -5149,64 +5101,60 @@ define <16 x i8> @trunc_or_const_v16i16_
define <4 x i32> @mul_add_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_v4i64_v4i32:
; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: pmuludq %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: psrad $31, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm0, %xmm5
+; SSE-NEXT: pmuludq %xmm2, %xmm5
; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: psrlq $32, %xmm2
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: psllq $32, %xmm2
+; SSE-NEXT: paddq %xmm5, %xmm2
+; SSE-NEXT: paddq %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm0, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
; SSE-NEXT: psrlq $32, %xmm0
; SSE-NEXT: pmuludq %xmm1, %xmm0
; SSE-NEXT: psllq $32, %xmm0
-; SSE-NEXT: paddq %xmm5, %xmm0
; SSE-NEXT: paddq %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pmuludq %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: psllq $32, %xmm4
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm3, %xmm2
-; SSE-NEXT: psllq $32, %xmm2
-; SSE-NEXT: paddq %xmm4, %xmm2
-; SSE-NEXT: paddq %xmm1, %xmm2
-; SSE-NEXT: paddq {{.*}}(%rip), %xmm2
+; SSE-NEXT: paddq %xmm3, %xmm0
; SSE-NEXT: paddq {{.*}}(%rip), %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: paddq {{.*}}(%rip), %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: mul_add_v4i64_v4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmuldq %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: mul_add_v4i64_v4i32:
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc.ll Thu Dec 15 12:03:38 2016
@@ -10,46 +10,19 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BWVL
define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
-; SSE2-LABEL: trunc8i64_8i32:
-; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: trunc8i64_8i32:
-; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: trunc8i64_8i32:
-; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT: retq
+; SSE-LABEL: trunc8i64_8i32:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i32:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -690,46 +663,19 @@ entry:
}
define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
-; SSE2-LABEL: trunc2x4i64_8i32:
-; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: trunc2x4i64_8i32:
-; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: trunc2x4i64_8i32:
-; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT: retq
+; SSE-LABEL: trunc2x4i64_8i32:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: trunc2x4i64_8i32:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -831,13 +777,9 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
; AVX1-LABEL: trunc2x4i64_8i16:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -915,46 +857,19 @@ entry:
}
define <4 x i32> @trunc2x2i64_4i32(<2 x i64> %a, <2 x i64> %b) {
-; SSE2-LABEL: trunc2x2i64_4i32:
-; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: trunc2x2i64_4i32:
-; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: trunc2x2i64_4i32:
-; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: trunc2x2i64_4i32:
-; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: retq
+; SSE-LABEL: trunc2x2i64_4i32:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
;
-; AVX2-LABEL: trunc2x2i64_4i32:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: trunc2x2i64_4i32:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX-NEXT: retq
;
; AVX512-LABEL: trunc2x2i64_4i32:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512-NEXT: retq
entry:
%0 = trunc <2 x i64> %a to <2 x i32>
Modified: llvm/trunk/test/CodeGen/X86/vsplit-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vsplit-and.ll?rev=289837&r1=289836&r2=289837&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vsplit-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vsplit-and.ll Thu Dec 15 12:03:38 2016
@@ -26,32 +26,28 @@ define void @t2(<3 x i64>* %dst, <3 x i6
; CHECK-NEXT: movd %r9, %xmm1
; CHECK-NEXT: movd %r8, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; CHECK-NEXT: movd %rdx, %xmm1
-; CHECK-NEXT: movd %rsi, %xmm2
-; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; CHECK-NEXT: movd %rcx, %xmm1
+; CHECK-NEXT: movd %rdx, %xmm2
+; CHECK-NEXT: movd %rsi, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: movd %rcx, %xmm2
; CHECK-NEXT: movq {{.*#+}} xmm3 = mem[0],zero
; CHECK-NEXT: pxor %xmm4, %xmm4
-; CHECK-NEXT: pcmpeqq %xmm4, %xmm1
-; CHECK-NEXT: pcmpeqd %xmm5, %xmm5
-; CHECK-NEXT: pxor %xmm5, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
; CHECK-NEXT: pcmpeqq %xmm4, %xmm2
+; CHECK-NEXT: pcmpeqd %xmm5, %xmm5
; CHECK-NEXT: pxor %xmm5, %xmm2
-; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; CHECK-NEXT: pslld $31, %xmm2
-; CHECK-NEXT: psrad $31, %xmm2
+; CHECK-NEXT: pcmpeqq %xmm4, %xmm1
+; CHECK-NEXT: pxor %xmm5, %xmm1
+; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; CHECK-NEXT: pslld $31, %xmm1
+; CHECK-NEXT: psrad $31, %xmm1
; CHECK-NEXT: pcmpeqq %xmm4, %xmm3
; CHECK-NEXT: pxor %xmm5, %xmm3
-; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,2]
; CHECK-NEXT: pcmpeqq %xmm4, %xmm0
; CHECK-NEXT: pxor %xmm5, %xmm0
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; CHECK-NEXT: pslld $31, %xmm0
; CHECK-NEXT: psrad $31, %xmm0
-; CHECK-NEXT: pand %xmm2, %xmm0
+; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; CHECK-NEXT: psllq $63, %xmm1
; CHECK-NEXT: psrad $31, %xmm1
More information about the llvm-commits
mailing list