[llvm] 88ff6f7 - [X86] Extend vselect(cond, pshufb(x), pshufb(y)) -> or(pshufb(x), pshufb(y)) to include inner or(pshufb(x), pshufb(y)) chains

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 10 05:05:17 PDT 2022


Author: Simon Pilgrim
Date: 2022-04-10T13:04:53+01:00
New Revision: 88ff6f70c45f2767576c64dde28cbfe7a90916ca

URL: https://github.com/llvm/llvm-project/commit/88ff6f70c45f2767576c64dde28cbfe7a90916ca
DIFF: https://github.com/llvm/llvm-project/commit/88ff6f70c45f2767576c64dde28cbfe7a90916ca.diff

LOG: [X86] Extend vselect(cond, pshufb(x), pshufb(y)) -> or(pshufb(x), pshufb(y)) to include inner or(pshufb(x), pshufb(y)) chains

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d74a8e72bbe89..5f313efe590f4 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -43960,6 +43960,13 @@ static SDValue combineLogicBlendIntoConditionalNegate(
   return DAG.getBitcast(VT, Res);
 }
 
+// Is this a PSHUFB or a OR(PSHUFB,PSHUFB) chain?
+static bool isPSHUFBOrChain(SDValue N) {
+  return N.getOpcode() == X86ISD::PSHUFB ||
+         (N.getOpcode() == ISD::OR && isPSHUFBOrChain(N.getOperand(0)) &&
+          isPSHUFBOrChain(N.getOperand(1)));
+}
+
 /// Do target-specific dag combines on SELECT and VSELECT nodes.
 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI,
@@ -44003,15 +44010,26 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
   // by forcing the unselected elements to zero.
   // TODO: Can we handle more shuffles with this?
   if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
-      LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
+      isPSHUFBOrChain(LHS) && isPSHUFBOrChain(RHS) &&
       LHS.hasOneUse() && RHS.hasOneUse()) {
-    MVT SimpleVT = VT.getSimpleVT();
-    SmallVector<SDValue, 1> LHSOps, RHSOps;
-    SmallVector<int, 64> LHSMask, RHSMask, CondMask;
-    if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
-        getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
-        getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
+    SmallVector<int, 64> CondMask;
+    if (createShuffleMaskFromVSELECT(CondMask, Cond)) {
+      MVT SimpleVT = VT.getSimpleVT();
       int NumElts = VT.getVectorNumElements();
+      SmallVector<SDValue, 1> LHSOps, RHSOps;
+      SmallVector<int, 64> LHSMask, RHSMask;
+      if (LHS.getOpcode() != X86ISD::PSHUFB ||
+          !getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask)) {
+        LHSOps.assign({LHS});
+        LHSMask.resize(NumElts);
+        std::iota(LHSMask.begin(), LHSMask.end(), 0);
+      }
+      if (RHS.getOpcode() != X86ISD::PSHUFB ||
+          !getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
+        RHSOps.assign({RHS});
+        RHSMask.resize(NumElts);
+        std::iota(RHSMask.begin(), RHSMask.end(), 0);
+      }
       for (int i = 0; i != NumElts; ++i) {
         // getConstVector sets negative shuffle mask values as undef, so ensure
         // we hardcode SM_SentinelZero values to zero (0x80).

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 54c12589b9439..7274673f1093e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -1086,66 +1086,49 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
 ; AVX2-SLOW-LABEL: load_i8_stride6_vf16:
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm0, %ymm8, %ymm4, %ymm5
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[4,10]
-; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
-; AVX2-SLOW-NEXT:    vpor %xmm7, %xmm3, %xmm3
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %xmm11, %xmm2, %xmm3, %xmm9
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,11]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
-; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX2-SLOW-NEXT:    vpblendvb %xmm11, %xmm3, %xmm5, %xmm10
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm4, %ymm8, %ymm3
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,6,12]
-; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm7, %xmm5
-; AVX2-SLOW-NEXT:    vpblendvb %xmm11, %xmm2, %xmm5, %xmm12
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,7,13]
-; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX2-SLOW-NEXT:    vpblendvb %xmm11, %xmm3, %xmm5, %xmm3
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm4, %ymm8, %ymm4
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm5[0,6,12],zero,zero,zero,xmm5[4,10,u,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm4[4,10],zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[u,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[2,8,14]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
-; AVX2-SLOW-NEXT:    vpor %xmm7, %xmm2, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[1,7,13],zero,zero,zero,xmm5[5,11,u,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[5,11],zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[u,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,9,15]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
-; AVX2-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm9, (%rsi)
-; AVX2-SLOW-NEXT:    vmovdqa %xmm10, (%rdx)
-; AVX2-SLOW-NEXT:    vmovdqa %xmm12, (%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %xmm3, (%r8)
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm2
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12],zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[2,8,14],zero,zero
+; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm8
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13],zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[3,9,15],zero,zero
+; AVX2-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm9
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %xmm6, 32(%rdi), %xmm5, %xmm5
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[2,8,14],zero,zero,xmm5[0,6,12],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,6,12]
+; AVX2-SLOW-NEXT:    vpor %xmm3, %xmm6, %xmm10
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[3,9,15],zero,zero,xmm5[1,7,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[1,7,13]
+; AVX2-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[2,8,14]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX2-SLOW-NEXT:    vpor %xmm6, %xmm3, %xmm3
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[5,11,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[3,9,15]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX2-SLOW-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm8, (%rsi)
+; AVX2-SLOW-NEXT:    vmovdqa %xmm9, (%rdx)
+; AVX2-SLOW-NEXT:    vmovdqa %xmm10, (%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %xmm5, (%r8)
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, (%r9)
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
 ; AVX2-SLOW-NEXT:    vzeroupper
@@ -1154,66 +1137,49 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
 ; AVX2-FAST-LABEL: load_i8_stride6_vf16:
 ; AVX2-FAST:       # %bb.0:
 ; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm8, %ymm4, %ymm5
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm2
-; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[4,10]
-; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
-; AVX2-FAST-NEXT:    vpor %xmm7, %xmm3, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %xmm11, %xmm2, %xmm3, %xmm9
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,11]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
-; AVX2-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX2-FAST-NEXT:    vpblendvb %xmm11, %xmm3, %xmm5, %xmm10
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm3, %ymm4, %ymm8, %ymm3
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,6,12]
-; AVX2-FAST-NEXT:    vpor %xmm5, %xmm7, %xmm5
-; AVX2-FAST-NEXT:    vpblendvb %xmm11, %xmm2, %xmm5, %xmm12
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,7,13]
-; AVX2-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX2-FAST-NEXT:    vpblendvb %xmm11, %xmm3, %xmm5, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm4, %ymm8, %ymm4
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm5[0,6,12],zero,zero,zero,xmm5[4,10,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm4[4,10],zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[2,8,14]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
-; AVX2-FAST-NEXT:    vpor %xmm7, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[1,7,13],zero,zero,zero,xmm5[5,11,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[5,11],zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[3,9,15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
-; AVX2-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa %xmm9, (%rsi)
-; AVX2-FAST-NEXT:    vmovdqa %xmm10, (%rdx)
-; AVX2-FAST-NEXT:    vmovdqa %xmm12, (%rcx)
-; AVX2-FAST-NEXT:    vmovdqa %xmm3, (%r8)
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm2
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12],zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[2,8,14],zero,zero
+; AVX2-FAST-NEXT:    vpor %xmm5, %xmm3, %xmm8
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13],zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[3,9,15],zero,zero
+; AVX2-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm9
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %xmm6, 32(%rdi), %xmm5, %xmm5
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[2,8,14],zero,zero,xmm5[0,6,12],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,6,12]
+; AVX2-FAST-NEXT:    vpor %xmm3, %xmm6, %xmm10
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[3,9,15],zero,zero,xmm5[1,7,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[1,7,13]
+; AVX2-FAST-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[2,8,14]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX2-FAST-NEXT:    vpor %xmm6, %xmm3, %xmm3
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,7,13],zero,zero,zero,xmm1[5,11,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[3,9,15]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX2-FAST-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
+; AVX2-FAST-NEXT:    vmovdqa %xmm9, (%rdx)
+; AVX2-FAST-NEXT:    vmovdqa %xmm10, (%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %xmm5, (%r8)
 ; AVX2-FAST-NEXT:    vmovdqa %xmm2, (%r9)
 ; AVX2-FAST-NEXT:    vmovdqa %xmm0, (%rax)
 ; AVX2-FAST-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 8f09a2fdc769d..710b4ecc0f954 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -658,36 +658,26 @@ define void @store_i8_stride6_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8],zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[2,10],zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,ymm3[20,28],zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[0,2,0,2]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm3[0,8],zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[18,26],zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,ymm3[20,28],zero,zero
 ; AVX2-SLOW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,ymm4[2,10],zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,ymm4[20,28],zero,zero,zero,zero,ymm4[21,29]
 ; AVX2-SLOW-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[5,13],zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm1[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[5,13],zero,zero,zero,zero,ymm6[6,14],zero,zero,zero,zero,ymm6[7,15],zero,zero,zero,zero,ymm6[16,24],zero,zero,zero,zero,ymm6[17,25],zero,zero,zero,zero,ymm6[18,26]
-; AVX2-SLOW-NEXT:    vpor %ymm4, %ymm6, %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u,18,26,u,u]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm4, %ymm6, %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[3,11],zero,zero,zero,zero,ymm0[4,12],zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[6,14],zero,zero,zero,zero,ymm0[7,15],zero,zero,zero,zero,ymm0[16,24],zero,zero,zero,zero,ymm0[17,25],zero,zero,zero,zero,ymm0[18,26],zero,zero
+; AVX2-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[5,13],zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero,ymm4[18,26]
+; AVX2-SLOW-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31],zero,zero
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,10],zero,zero,zero,zero,ymm2[3,11],zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,ymm2[21,29],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,ymm2[23,31]
-; AVX2-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31,u,u]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 32(%rax)
+; AVX2-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 64(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 32(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%rax)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
@@ -698,36 +688,26 @@ define void @store_i8_stride6_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
 ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8],zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[2,10],zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,ymm3[20,28],zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[0,2,0,2]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm3[0,8],zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[18,26],zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,ymm3[20,28],zero,zero
 ; AVX2-FAST-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,ymm4[2,10],zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,ymm4[20,28],zero,zero,zero,zero,ymm4[21,29]
 ; AVX2-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX2-FAST-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[5,13],zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm1[0,2,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[5,13],zero,zero,zero,zero,ymm6[6,14],zero,zero,zero,zero,ymm6[7,15],zero,zero,zero,zero,ymm6[16,24],zero,zero,zero,zero,ymm6[17,25],zero,zero,zero,zero,ymm6[18,26]
-; AVX2-FAST-NEXT:    vpor %ymm4, %ymm6, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u,18,26,u,u]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm4, %ymm6, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[3,11],zero,zero,zero,zero,ymm0[4,12],zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[6,14],zero,zero,zero,zero,ymm0[7,15],zero,zero,zero,zero,ymm0[16,24],zero,zero,zero,zero,ymm0[17,25],zero,zero,zero,zero,ymm0[18,26],zero,zero
+; AVX2-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,2,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[5,13],zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero,ymm4[18,26]
+; AVX2-FAST-NEXT:    vpor %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31],zero,zero
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,10],zero,zero,zero,zero,ymm2[3,11],zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,ymm2[21,29],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,ymm2[23,31]
-; AVX2-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31,u,u]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX2-FAST-NEXT:    vmovdqa %ymm4, 32(%rax)
+; AVX2-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT:    vmovdqa %ymm1, 64(%rax)
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, 32(%rax)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm3, (%rax)
 ; AVX2-FAST-NEXT:    vzeroupper
 ; AVX2-FAST-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
index ab2215c78ebde..167abef397469 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -3371,24 +3371,15 @@ define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_
 define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39(<32 x i8> %a, <32 x i8> %b) {
 ; AVX1-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,4,u,1,6],zero,zero,xmm2[0],zero,xmm2[11,u],zero,zero,zero,zero
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = xmm1[u,u],zero,xmm1[u],zero,zero,xmm1[5,0],zero,xmm1[10],zero,xmm1[u,4,2,4,7]
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[8,6,u,6,u,u,u,u,u,u,u,15,u,u,u,u]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,0,255,0,255,255,255,255,255,255,255,0,255,255,255,255]
-; AVX1-NEXT:    vpblendvb %xmm6, %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[u,u],zero,zero,xmm2[12],zero,xmm2[u,u,u],zero,zero,xmm2[u,0,3]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,13,u,u,3,3],zero,xmm1[8,u,u,u,12,1,u],zero,zero
-; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u],zero,zero,xmm4[u,u,u,u,1,6,13,u,u],zero,xmm4[u,u]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13,u,u,u,u],zero,zero,zero,xmm0[u,u,12,u,u]
-; AVX1-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,0,0,255,255,255,255,0,0,0,255,255,0,255,255]
-; AVX1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,6],zero,xmm2[6],zero,zero,zero,zero,zero,zero,zero,xmm2[15],zero,zero,zero,zero
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,xmm1[5,0],zero,xmm1[10],zero,zero,xmm1[4,2,4,7]
+; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[12],zero,zero
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,13],zero,zero,xmm1[3,3],zero,xmm1[8],zero,zero,zero,xmm1[12,1],zero,zero,zero
+; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
@@ -3407,15 +3398,11 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
 ;
 ; AVX2-FAST-ALL-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
 ; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm2 = ymm1[10,13],zero,zero,ymm1[3,3],zero,ymm1[8],zero,zero,zero,ymm1[12,1],zero,zero,zero,zero,zero,ymm1[20],zero,ymm1[17,22],zero,zero,ymm1[16],zero,ymm1[27],zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,ymm1[12],zero,zero,zero,zero,zero,zero,zero,ymm1[0,3],zero,zero,zero,zero,zero,zero,ymm1[21,16],zero,ymm1[26],zero,zero,ymm1[20,18,20,23]
-; AVX2-FAST-ALL-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = <3,4,5,7,5,4,1,u>
 ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm2, %ymm0
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,0,1,u,u,u,u,5,10,13,u,u,0,u,u,16,23,u,23,u,u,u,u,u,u,u,27,u,u,u,u]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,0,0,255,255,255,255,0,0,0,255,255,0,255,255,0,0,255,0,255,255,255,255,255,255,255,0,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0,1],zero,zero,zero,zero,ymm0[5,10,13],zero,zero,ymm0[0],zero,zero,ymm0[16,23],zero,ymm0[23],zero,zero,zero,zero,zero,zero,zero,ymm0[27],zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[10,13],zero,zero,ymm1[3,3],zero,ymm1[8],zero,zero,zero,ymm1[12,1],zero,zero,zero,zero,zero,ymm1[20],zero,ymm1[17,22],zero,zero,ymm1[16],zero,ymm1[27],zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX2-FAST-ALL-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
@@ -3450,14 +3437,11 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
 ; AVX512VLBW-FAST-ALL-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
 ; AVX512VLBW-FAST-ALL:       # %bb.0:
 ; AVX512VLBW-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = <3,4,5,7,5,4,1,u>
-; AVX512VLBW-FAST-ALL-NEXT:    vpermd %ymm0, %ymm2, %ymm2
-; AVX512VLBW-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[10,13],zero,zero,ymm1[3,3],zero,ymm1[8],zero,zero,zero,ymm1[12,1],zero,zero,zero,zero,zero,ymm1[20],zero,ymm1[17,22],zero,zero,ymm1[16],zero,ymm1[27],zero,zero,zero,zero,zero
+; AVX512VLBW-FAST-ALL-NEXT:    vpermd %ymm0, %ymm2, %ymm0
+; AVX512VLBW-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0,1],zero,zero,zero,zero,ymm0[5,10,13],zero,zero,ymm0[0],zero,zero,ymm0[16,23],zero,ymm0[23],zero,zero,zero,zero,zero,zero,zero,ymm0[27],zero,zero,zero,zero
 ; AVX512VLBW-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
 ; AVX512VLBW-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,ymm1[12],zero,zero,zero,zero,zero,zero,zero,ymm1[0,3],zero,zero,zero,zero,zero,zero,ymm1[21,16],zero,ymm1[26],zero,zero,ymm1[20,18,20,23]
 ; AVX512VLBW-FAST-ALL-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512VLBW-FAST-ALL-NEXT:    movl $134948620, %eax # imm = 0x80B270C
-; AVX512VLBW-FAST-ALL-NEXT:    kmovd %eax, %k1
-; AVX512VLBW-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 {%k1} = ymm2[u,u,0,1,u,u,u,u,5,10,13,u,u,0,u,u,16,23,u,23,u,u,u,u,u,u,u,27,u,u,u,u]
 ; AVX512VLBW-FAST-ALL-NEXT:    retq
 ;
 ; AVX512VLBW-FAST-PERLANE-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:


        


More information about the llvm-commits mailing list