[llvm] a640f16 - [X86] combineAnd - don't demand operand vector elements if the other operand element is zero
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 16 08:55:15 PST 2021
Author: Simon Pilgrim
Date: 2021-12-16T16:54:27Z
New Revision: a640f16ca2da46bb5fcd522d2130234a41e157e9
URL: https://github.com/llvm/llvm-project/commit/a640f16ca2da46bb5fcd522d2130234a41e157e9
DIFF: https://github.com/llvm/llvm-project/commit/a640f16ca2da46bb5fcd522d2130234a41e157e9.diff
LOG: [X86] combineAnd - don't demand operand vector elements if the other operand element is zero
If either operand has a zero element, then we don't need the equivalent element from the other operand, as no bits will be set.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
llvm/test/CodeGen/X86/oddshuffles.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
llvm/test/CodeGen/X86/vector-partial-undef.ll
llvm/test/CodeGen/X86/vector-trunc-math.ll
llvm/test/CodeGen/X86/vector-trunc-packus.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8e72ce75140f..9b29aaea15dc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -46322,11 +46322,36 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
return R;
- // Attempt to recursively combine a bitmask AND with shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
+ // Attempt to recursively combine a bitmask AND with shuffles.
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
return Res;
+
+ // If either operand is a constant mask, then only the elements that aren't
+ // zero are actually demanded by the other operand.
+ auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
+ APInt UndefElts;
+ SmallVector<APInt> EltBits;
+ int NumElts = VT.getVectorNumElements();
+ int EltSizeInBits = VT.getScalarSizeInBits();
+ if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
+ return false;
+
+ APInt DemandedElts = APInt::getZero(NumElts);
+ for (int I = 0; I != NumElts; ++I)
+ if (!EltBits[I].isZero())
+ DemandedElts.setBit(I);
+
+ APInt KnownUndef, KnownZero;
+ return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, KnownUndef,
+ KnownZero, DCI);
+ };
+ if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
+ if (N->getOpcode() != ISD::DELETED_NODE)
+ DCI.AddToWorklist(N);
+ return SDValue(N, 0);
+ }
}
// Attempt to combine a scalar bitmask AND with an extracted shuffle.
diff --git a/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
index d17b677276b9..5307fecbf684 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-cvt-ph-w-vl-intrinsics.ll
@@ -739,8 +739,7 @@ define <2 x half> @test_s8tofp2(<2 x i8> %arg0) {
define <2 x half> @test_u1tofp2(<2 x i1> %arg0) {
; CHECK-LABEL: test_u1tofp2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-NEXT: vpmovqw %xmm0, %xmm0
; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vcvtuw2ph %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 86b20c249d8c..0b038d4d383f 100644
--- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -613,7 +613,6 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -639,7 +638,6 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index e52cf736aa1f..fe9d52d4c2e1 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -840,9 +840,8 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE2-NEXT: pandn %xmm5, %xmm4
; SSE2-NEXT: por %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,1,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE2-NEXT: packuswb %xmm1, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -1087,9 +1086,8 @@ define void @interleave_24i16_out_reverse(<24 x i16>* %p, <8 x i16>* %q1, <8 x i
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,0,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,6,6,6]
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
; SSE2-NEXT: pand %xmm6, %xmm5
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,4,7,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 9154c4bf3084..386fac87aa31 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -838,11 +838,11 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: pandn %xmm1, %xmm5
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: pandn %xmm1, %xmm6
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por %xmm0, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; SSE-NEXT: pandn %xmm9, %xmm12
; SSE-NEXT: por %xmm12, %xmm7
@@ -856,23 +856,23 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: movdqa %xmm4, %xmm11
; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm5
-; SSE-NEXT: por %xmm5, %xmm11
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: por %xmm6, %xmm11
; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; SSE-NEXT: movdqa %xmm15, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm0[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm0[3,0]
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm15[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm15[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,4,5]
-; SSE-NEXT: packuswb %xmm0, %xmm5
+; SSE-NEXT: packuswb %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
@@ -881,14 +881,14 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm5
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pandn %xmm1, %xmm6
-; SSE-NEXT: por %xmm5, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: por %xmm2, %xmm6
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
; SSE-NEXT: movdqa %xmm7, %xmm0
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
@@ -901,10 +901,10 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: pand %xmm4, %xmm5
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm6, %xmm0
@@ -973,18 +973,17 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm13[8],xmm1[9],xmm13[9],xmm1[10],xmm13[10],xmm1[11],xmm13[11],xmm1[12],xmm13[12],xmm1[13],xmm13[13],xmm1[14],xmm13[14],xmm1[15],xmm13[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,7,4]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,0,0]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm12[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm5, %xmm2
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm12[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,7,4]
+; SSE-NEXT: pandn %xmm5, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: packuswb %xmm2, %xmm1
; SSE-NEXT: pandn %xmm1, %xmm0
@@ -1529,7 +1528,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pandn %xmm8, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill
; SSE-NEXT: pand %xmm2, %xmm13
; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: pandn %xmm14, %xmm4
@@ -1561,7 +1560,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm2, %xmm10
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -1635,10 +1634,10 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pand %xmm7, %xmm6
; SSE-NEXT: por %xmm2, %xmm6
; SSE-NEXT: packuswb %xmm6, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm9, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm11, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: pand %xmm11, %xmm5
; SSE-NEXT: por %xmm5, %xmm6
; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm1, %xmm2
@@ -1679,9 +1678,9 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pandn %xmm0, %xmm7
; SSE-NEXT: por %xmm1, %xmm7
; SSE-NEXT: packuswb %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm11, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm9, %xmm15
+; SSE-NEXT: pand %xmm11, %xmm15
; SSE-NEXT: por %xmm15, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
@@ -1696,7 +1695,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; SSE-NEXT: por (%rsp), %xmm13 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
@@ -1710,9 +1709,8 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pandn %xmm11, %xmm0
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: por %xmm0, %xmm14
; SSE-NEXT: movdqa %xmm14, %xmm0
; SSE-NEXT: pand %xmm12, %xmm0
@@ -1722,9 +1720,9 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm11, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm5, %xmm8
@@ -1738,9 +1736,9 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
@@ -1752,7 +1750,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: por %xmm0, %xmm7
@@ -1764,25 +1762,25 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm11, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[3,1,2,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
; SSE-NEXT: packuswb %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm13, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
@@ -1816,31 +1814,31 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm15
+; SSE-NEXT: movdqa %xmm11, %xmm15
; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: pand %xmm11, %xmm5
; SSE-NEXT: por %xmm5, %xmm15
; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm10[8],xmm8[9],xmm10[9],xmm8[10],xmm10[10],xmm8[11],xmm10[11],xmm8[12],xmm10[12],xmm8[13],xmm10[13],xmm8[14],xmm10[14],xmm8[15],xmm10[15]
; SSE-NEXT: movdqa %xmm8, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm8[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[3,0]
+; SSE-NEXT: movaps %xmm0, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm8[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm8[2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm8[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,4,5]
; SSE-NEXT: packuswb %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm3, %xmm6
@@ -1861,11 +1859,11 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm14
-; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm0, %xmm14
-; SSE-NEXT: por %xmm1, %xmm14
+; SSE-NEXT: pandn %xmm0, %xmm11
+; SSE-NEXT: por %xmm1, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm14
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm4, %xmm0
@@ -1889,7 +1887,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: pand %xmm9, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,0,65535,65535,0]
; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -1974,68 +1972,66 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: packuswb %xmm7, %xmm7
; SSE-NEXT: pand %xmm9, %xmm7
; SSE-NEXT: por %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,5,5,7,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535,0,65535,0,0]
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: pandn %xmm4, %xmm6
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,5,5,7,4]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,0,65535,0,0]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm6, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: por %xmm6, %xmm4
-; SSE-NEXT: packuswb %xmm4, %xmm4
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: por %xmm4, %xmm3
+; SSE-NEXT: packuswb %xmm3, %xmm3
; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: pand %xmm0, %xmm7
; SSE-NEXT: por %xmm7, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm3[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm3[2,3]
+; SSE-NEXT: psrlq $48, %xmm3
+; SSE-NEXT: psrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
+; SSE-NEXT: packuswb %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm4[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm4[2,3]
-; SSE-NEXT: psrlq $48, %xmm4
-; SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm7, %xmm4
-; SSE-NEXT: movdqa %xmm8, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: pandn %xmm7, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm5, %xmm7
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: por %xmm7, %xmm5
; SSE-NEXT: packuswb %xmm5, %xmm5
; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: pandn %xmm4, %xmm9
+; SSE-NEXT: pandn %xmm3, %xmm9
; SSE-NEXT: por %xmm9, %xmm5
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,7,4]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3],xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm11[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,7,4]
+; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pand %xmm0, %xmm5
-; SSE-NEXT: packuswb %xmm3, %xmm1
+; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index e06b1faeae67..f639eae42eff 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -919,7 +919,7 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,6,7,4,5,6,7,8,9,4,5,10,11,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,6,7,u,u,u,u,8,9,4,5,10,11,u,u]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,6]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
index cb541ea89f35..f8390f581157 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
@@ -149,9 +149,8 @@ define void @store_i8_stride3_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
; SSE-NEXT: pandn %xmm5, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,1,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
@@ -233,15 +232,15 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
; SSE-NEXT: movdqa (%rdx), %xmm8
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
+; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,5,5,6]
-; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: por %xmm3, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE-NEXT: pand %xmm2, %xmm6
@@ -250,43 +249,42 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pandn %xmm7, %xmm3
; SSE-NEXT: por %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,2,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm7
+; SSE-NEXT: pandn %xmm6, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6]
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,4,7]
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pandn %xmm7, %xmm4
+; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7]
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm5, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[0,1,0,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6]
-; SSE-NEXT: movdqa %xmm2, %xmm7
-; SSE-NEXT: pandn %xmm6, %xmm7
-; SSE-NEXT: por %xmm0, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,7,7]
-; SSE-NEXT: pand %xmm4, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: por %xmm5, %xmm4
-; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,6,7]
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm4, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, 32(%rcx)
-; SSE-NEXT: movdqa %xmm7, (%rcx)
+; SSE-NEXT: movdqa %xmm4, (%rcx)
; SSE-NEXT: movdqa %xmm3, 16(%rcx)
; SSE-NEXT: retq
;
@@ -350,126 +348,124 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr1, <32 x i8>* %in.vecptr2, <96 x i8>* %out.vec) nounwind {
; SSE-LABEL: store_i8_stride3_vf32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa (%rdi), %xmm8
+; SSE-NEXT: movdqa (%rdi), %xmm9
; SSE-NEXT: movdqa 16(%rdi), %xmm11
-; SSE-NEXT: movdqa (%rsi), %xmm12
-; SSE-NEXT: movdqa 16(%rsi), %xmm13
-; SSE-NEXT: movdqa (%rdx), %xmm9
+; SSE-NEXT: movdqa (%rsi), %xmm13
+; SSE-NEXT: movdqa 16(%rsi), %xmm7
+; SSE-NEXT: movdqa (%rdx), %xmm8
; SSE-NEXT: movdqa 16(%rdx), %xmm10
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6]
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa %xmm4, %xmm12
+; SSE-NEXT: pandn %xmm6, %xmm12
+; SSE-NEXT: por %xmm2, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,1,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm2, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,5,6]
-; SSE-NEXT: pand %xmm6, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
-; SSE-NEXT: pand %xmm7, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm11[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
-; SSE-NEXT: movdqa %xmm7, %xmm14
-; SSE-NEXT: pandn %xmm5, %xmm14
-; SSE-NEXT: por %xmm4, %xmm14
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm9[2,1,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: pandn %xmm4, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm12[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,5,6]
-; SSE-NEXT: pand %xmm6, %xmm4
-; SSE-NEXT: por %xmm5, %xmm4
-; SSE-NEXT: pand %xmm7, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,6,5]
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pandn %xmm5, %xmm1
-; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[0,1,0,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
-; SSE-NEXT: pand %xmm6, %xmm4
-; SSE-NEXT: movdqa %xmm13, %xmm5
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3],xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,4,7]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pandn %xmm5, %xmm0
-; SSE-NEXT: por %xmm4, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm10[0,1,0,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm4[0,1,2,3,5,5,6,6]
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: pandn %xmm5, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,1,0,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,6]
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm5
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,4,7]
-; SSE-NEXT: movdqa %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm5, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,1,0,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,6]
-; SSE-NEXT: movdqa %xmm7, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: por %xmm3, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
-; SSE-NEXT: movdqa %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6]
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: pandn %xmm1, %xmm6
+; SSE-NEXT: por %xmm2, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,5,6,6,7]
+; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,1,1,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,2,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,5,6,6]
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: por %xmm3, %xmm7
+; SSE-NEXT: pand %xmm4, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,1,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,5,5,6,6]
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: por %xmm7, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa %xmm0, %xmm7
+; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7]
-; SSE-NEXT: pand %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,6,7]
-; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: por %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm7, 32(%rcx)
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm13, %xmm7
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,6,6,7]
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: pandn %xmm7, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,1,1,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[0,1,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: por %xmm7, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,6]
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm4, (%rcx)
+; SSE-NEXT: movdqa %xmm5, 32(%rcx)
+; SSE-NEXT: movdqa %xmm1, 48(%rcx)
; SSE-NEXT: movdqa %xmm2, 80(%rcx)
-; SSE-NEXT: movdqa %xmm5, (%rcx)
-; SSE-NEXT: movdqa %xmm4, 48(%rcx)
-; SSE-NEXT: movdqa %xmm1, 16(%rcx)
-; SSE-NEXT: movdqa %xmm14, 64(%rcx)
+; SSE-NEXT: movdqa %xmm6, 16(%rcx)
+; SSE-NEXT: movdqa %xmm12, 64(%rcx)
; SSE-NEXT: retq
;
; AVX1-LABEL: store_i8_stride3_vf32:
diff --git a/llvm/test/CodeGen/X86/vector-partial-undef.ll b/llvm/test/CodeGen/X86/vector-partial-undef.ll
index 21d82efbe610..5b6437592f84 100644
--- a/llvm/test/CodeGen/X86/vector-partial-undef.ll
+++ b/llvm/test/CodeGen/X86/vector-partial-undef.ll
@@ -85,9 +85,7 @@ define <4 x i64> @and_undef_elts(<2 x i64> %x) {
;
; AVX-LABEL: and_undef_elts:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,1,2]
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%extend = shufflevector <2 x i64> %x, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%bogus_bo = and <4 x i64> %extend, <i64 undef, i64 undef, i64 42, i64 43>
diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll
index 1d610f9f0b60..3f16cfd6298c 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-math.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll
@@ -2940,7 +2940,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX2-FAST-ALL-LABEL: trunc_and_const_v4i64_v4i32:
; AVX2-FAST-ALL: # %bb.0:
-; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,6,u,u,u,u>
+; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm1 = <u,2,4,6,u,u,u,u>
; AVX2-FAST-ALL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-FAST-ALL-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX2-FAST-ALL-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
index 9ab03ce654e8..3c67adfac947 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
@@ -2730,10 +2730,9 @@ define <2 x i8> @trunc_packus_v2i64_v2i8(<2 x i64> %a0) {
; SSE2-NEXT: pxor %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -2881,10 +2880,9 @@ define void @trunc_packus_v2i64_v2i8_store(<2 x i64> %a0, <2 x i8> *%p1) {
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: pand %xmm3, %xmm1
@@ -3054,20 +3052,18 @@ define <4 x i8> @trunc_packus_v4i64_v4i8(<4 x i64> %a0) {
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm4, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
@@ -3312,20 +3308,18 @@ define void @trunc_packus_v4i64_v4i8_store(<4 x i64> %a0, <4 x i8> *%p1) {
; SSE2-NEXT: pxor %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm4, %xmm0
; SSE2-NEXT: pxor %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
More information about the llvm-commits
mailing list