[llvm] 67076eb - [X86][AVX] lowerShuffleAsLanePermuteAndShuffle - don't split repeated mask patterns

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 14 09:11:09 PST 2022


Author: Simon Pilgrim
Date: 2022-01-14T17:10:37Z
New Revision: 67076ebb606e9fc0e6403796ab9cb2952001684d

URL: https://github.com/llvm/llvm-project/commit/67076ebb606e9fc0e6403796ab9cb2952001684d
DIFF: https://github.com/llvm/llvm-project/commit/67076ebb606e9fc0e6403796ab9cb2952001684d.diff

LOG: [X86][AVX] lowerShuffleAsLanePermuteAndShuffle - don't split repeated mask patterns

Generalize 57a551a8dfa7 - if the inlane mask is a repeated mask, we're better off performing the lane permute instead of splitting

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/oddshuffles.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a5061f5a97768..0b9a72aeb91bb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -16544,30 +16544,22 @@ static SDValue lowerShuffleAsLanePermuteAndShuffle(
             lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG))
       return V;
 
-  // Always allow ElementRotate patterns - these are sometimes hidden but its
-  // still better to avoid splitting.
-  SDValue RotV1 = V1, RotV2 = V2;
-  bool IsElementRotate = 0 <= matchShuffleAsElementRotate(RotV1, RotV2, Mask);
-
   // If there are only inputs from one 128-bit lane, splitting will in fact be
   // less expensive. The flags track whether the given lane contains an element
   // that crosses to another lane.
-  if (!IsElementRotate) {
-    if (!Subtarget.hasAVX2()) {
-      bool LaneCrossing[2] = {false, false};
-      for (int i = 0; i < Size; ++i)
-        if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
-          LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
-      if (!LaneCrossing[0] || !LaneCrossing[1])
-        return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
-    } else {
-      bool LaneUsed[2] = {false, false};
-      for (int i = 0; i < Size; ++i)
-        if (Mask[i] >= 0)
-          LaneUsed[(Mask[i] % Size) / LaneSize] = true;
-      if (!LaneUsed[0] || !LaneUsed[1])
-        return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
-    }
+  bool AllLanes;
+  if (!Subtarget.hasAVX2()) {
+    bool LaneCrossing[2] = {false, false};
+    for (int i = 0; i < Size; ++i)
+      if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
+        LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
+    AllLanes = LaneCrossing[0] && LaneCrossing[1];
+  } else {
+    bool LaneUsed[2] = {false, false};
+    for (int i = 0; i < Size; ++i)
+      if (Mask[i] >= 0)
+        LaneUsed[(Mask[i] % Size) / LaneSize] = true;
+    AllLanes = LaneUsed[0] && LaneUsed[1];
   }
 
   // TODO - we could support shuffling V2 in the Flipped input.
@@ -16585,6 +16577,11 @@ static SDValue lowerShuffleAsLanePermuteAndShuffle(
   assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
          "In-lane shuffle mask expected");
 
+  // If we're not using both lanes in each lane and the inlane mask is not
+  // repeating, then we're better off splitting.
+  if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
+    return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
+
   // Flip the lanes, and shuffle the results which should now be in-lane.
   MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
   SDValue Flipped = DAG.getBitcast(PVT, V1);

diff  --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index fe9d52d4c2e15..cb2e97e18e8a2 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -1470,39 +1470,30 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
 ;
 ; AVX1-LABEL: interleave_24i32_out:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovups (%rdi), %ymm0
+; AVX1-NEXT:    vmovups 64(%rdi), %ymm0
 ; AVX1-NEXT:    vmovups 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovups 80(%rdi), %xmm2
-; AVX1-NEXT:    vmovups 64(%rdi), %xmm3
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vmovups %ymm4, (%rsi)
+; AVX1-NEXT:    vmovups (%rdi), %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX1-NEXT:    vmovups 16(%rdi), %xmm4
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm1[1,3],ymm4[6,5],ymm1[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm4[0,2],ymm3[4,7],ymm4[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[2,0],ymm4[3,0],ymm0[6,4],ymm4[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm5[2,0],ymm4[4,4],ymm5[6,4]
+; AVX1-NEXT:    vmovups 16(%rdi), %xmm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm6[0,3],ymm7[5,6],ymm6[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[2,0],ymm6[5,4],ymm2[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[0,3],ymm4[4,5],ymm0[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
+; AVX1-NEXT:    vmovups %ymm3, (%rsi)
 ; AVX1-NEXT:    vmovups %ymm5, (%rdx)
 ; AVX1-NEXT:    vmovups %ymm0, (%rcx)
 ; AVX1-NEXT:    vzeroupper
@@ -1600,39 +1591,30 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
 ;
 ; XOP-LABEL: interleave_24i32_out:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovups (%rdi), %ymm0
+; XOP-NEXT:    vmovups 64(%rdi), %ymm0
 ; XOP-NEXT:    vmovups 32(%rdi), %ymm1
-; XOP-NEXT:    vmovups 80(%rdi), %xmm2
-; XOP-NEXT:    vmovups 64(%rdi), %xmm3
-; XOP-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
-; XOP-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; XOP-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; XOP-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; XOP-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
-; XOP-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; XOP-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; XOP-NEXT:    vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
-; XOP-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; XOP-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; XOP-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; XOP-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; XOP-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; XOP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
-; XOP-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
-; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; XOP-NEXT:    vmovups %ymm4, (%rsi)
+; XOP-NEXT:    vmovups (%rdi), %ymm2
+; XOP-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; XOP-NEXT:    vmovups 16(%rdi), %xmm4
+; XOP-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm1[1,3],ymm4[6,5],ymm1[5,7]
+; XOP-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm4[0,2],ymm3[4,7],ymm4[4,6]
+; XOP-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; XOP-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
+; XOP-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
+; XOP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
+; XOP-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[2,0],ymm4[3,0],ymm0[6,4],ymm4[7,4]
+; XOP-NEXT:    vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm5[2,0],ymm4[4,4],ymm5[6,4]
+; XOP-NEXT:    vmovups 16(%rdi), %xmm6
+; XOP-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; XOP-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm6[0,3],ymm7[5,6],ymm6[4,7]
+; XOP-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
+; XOP-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
+; XOP-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; XOP-NEXT:    vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[2,0],ymm6[5,4],ymm2[6,4]
+; XOP-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
+; XOP-NEXT:    vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[0,3],ymm4[4,5],ymm0[4,7]
+; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
+; XOP-NEXT:    vmovups %ymm3, (%rsi)
 ; XOP-NEXT:    vmovups %ymm5, (%rdx)
 ; XOP-NEXT:    vmovups %ymm0, (%rcx)
 ; XOP-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
index e48a1e3e88541..89ec6447f9567 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
@@ -208,39 +208,30 @@ define void @load_i32_stride3_vf8(<24 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ;
 ; AVX1-LABEL: load_i32_stride3_vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm0
 ; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm2
-; AVX1-NEXT:    vmovaps 64(%rdi), %xmm3
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vmovaps %ymm4, (%rsi)
+; AVX1-NEXT:    vmovaps (%rdi), %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm4
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm1[1,3],ymm4[6,5],ymm1[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm4[0,2],ymm3[4,7],ymm4[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[2,0],ymm4[3,0],ymm0[6,4],ymm4[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm5[2,0],ymm4[4,4],ymm5[6,4]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm6[0,3],ymm7[5,6],ymm6[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[2,0],ymm6[5,4],ymm2[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[0,3],ymm4[4,5],ymm0[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
+; AVX1-NEXT:    vmovaps %ymm3, (%rsi)
 ; AVX1-NEXT:    vmovaps %ymm5, (%rdx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX1-NEXT:    vzeroupper
@@ -460,75 +451,57 @@ define void @load_i32_stride3_vf16(<48 x i32>* %in.vec, <16 x i32>* %out.vec0, <
 ;
 ; AVX1-LABEL: load_i32_stride3_vf16:
 ; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm3
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm5
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm6
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm11
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6],ymm11[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm3
-; AVX1-NEXT:    vmovaps 176(%rdi), %xmm13
-; AVX1-NEXT:    vmovaps 160(%rdi), %xmm4
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = zero,zero,xmm4[2],xmm13[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3,4,5],ymm7[6,7]
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm3
-; AVX1-NEXT:    vmovaps 64(%rdi), %xmm7
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm7[2],xmm3[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm8[0,1],xmm1[2],xmm8[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm7[0,1],xmm3[2],xmm7[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3,4],ymm0[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm11[2],ymm12[3,4],ymm11[5],ymm12[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm13[2],xmm4[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[0,1],xmm3[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[0,1],xmm13[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
-; AVX1-NEXT:    vmovaps %ymm10, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm9, 32(%rsi)
-; AVX1-NEXT:    vmovaps %ymm0, 32(%rdx)
-; AVX1-NEXT:    vmovaps %ymm8, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rcx)
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7]
+; AVX1-NEXT:    vmovaps 112(%rdi), %xmm7
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[2,1],ymm2[1,3],ymm7[6,5],ymm2[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm6[0,3],ymm7[0,2],ymm6[4,7],ymm7[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm0[1,0],ymm6[2,0],ymm0[5,4],ymm6[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm9
+; AVX1-NEXT:    vshufps {{.*#+}} ymm9 = ymm9[2,1],ymm3[1,3],ymm9[6,5],ymm3[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[0,3],ymm9[0,2],ymm8[4,7],ymm9[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm1[1,0],ymm9[2,0],ymm1[5,4],ymm9[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm1[2,0],ymm9[3,0],ymm1[6,4],ymm9[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm9[0,0],ymm10[2,0],ymm9[4,4],ymm10[6,4]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm11
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[1,2],ymm11[0,3],ymm12[5,6],ymm11[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3,4],ymm10[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm0[2,0],ymm6[3,0],ymm0[6,4],ymm6[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm6[0,0],ymm12[2,0],ymm6[4,4],ymm12[6,4]
+; AVX1-NEXT:    vmovaps 112(%rdi), %xmm13
+; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm14 = ymm14[1,2],ymm13[0,3],ymm14[5,6],ymm13[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm14[0,1,2,3,4],ymm12[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm11[1,0],ymm5[2,0],ymm11[5,4],ymm5[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm5[2,0],ymm3[0,3],ymm5[6,4],ymm3[4,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm9[0,1],ymm1[0,3],ymm9[4,5],ymm1[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm13[1,0],ymm3[2,0],ymm13[5,4],ymm3[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[2,0],ymm2[0,3],ymm3[6,4],ymm2[4,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm6[0,1],ymm0[0,3],ymm6[4,5],ymm0[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
+; AVX1-NEXT:    vmovaps %ymm8, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm7, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm12, 32(%rdx)
+; AVX1-NEXT:    vmovaps %ymm10, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm0, 32(%rcx)
 ; AVX1-NEXT:    vmovaps %ymm1, (%rcx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -938,171 +911,129 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
 ;
 ; AVX1-LABEL: load_i32_stride3_vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $392, %rsp # imm = 0x188
-; AVX1-NEXT:    vmovaps 320(%rdi), %ymm3
-; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    subq $456, %rsp # imm = 0x1C8
+; AVX1-NEXT:    vmovaps 352(%rdi), %ymm3
+; AVX1-NEXT:    vmovaps 320(%rdi), %ymm9
 ; AVX1-NEXT:    vmovaps 288(%rdi), %ymm4
 ; AVX1-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps (%rdi), %ymm2
-; AVX1-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm6
-; AVX1-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0],ymm1[1],ymm5[2,3],ymm1[4],ymm5[5,6],ymm1[7]
-; AVX1-NEXT:    vmovaps %ymm5, %ymm9
-; AVX1-NEXT:    vmovaps %ymm1, %ymm10
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps 176(%rdi), %xmm5
-; AVX1-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 160(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm1[2],xmm5[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm6
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX1-NEXT:    vmovaps (%rdi), %ymm8
+; AVX1-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm10
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm7
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm0
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5,6],ymm7[7]
+; AVX1-NEXT:    vmovaps 112(%rdi), %xmm1
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm7[1,3],ymm1[6,5],ymm7[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm10[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm10[1,0],ymm5[2,0],ymm10[5,4],ymm5[6,4]
+; AVX1-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps 64(%rdi), %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm1[1]
-; AVX1-NEXT:    vmovaps %xmm1, %xmm8
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm6[1],ymm2[2,3],ymm6[4],ymm2[5,6],ymm6[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0],ymm2[1],ymm8[2,3],ymm2[4],ymm8[5,6],ymm2[7]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm1
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm2[1,3],ymm1[6,5],ymm2[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm6[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm6[1,0],ymm13[2,0],ymm6[5,4],ymm13[6,4]
+; AVX1-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps 368(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 352(%rdi), %xmm11
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm11[2],xmm1[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm9[1],ymm4[2,3],ymm9[4],ymm4[5,6],ymm9[7]
+; AVX1-NEXT:    vmovaps 304(%rdi), %xmm1
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1],ymm9[1,3],ymm1[6,5],ymm9[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm1[0,2],ymm0[4,7],ymm1[4,6]
+; AVX1-NEXT:    vmovaps %ymm3, %ymm4
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[1,0],ymm15[2,0],ymm3[5,4],ymm15[6,4]
+; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 224(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps 224(%rdi), %ymm14
+; AVX1-NEXT:    vmovaps 208(%rdi), %xmm0
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm14[1,3],ymm0[6,5],ymm14[5,7]
 ; AVX1-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX1-NEXT:    vmovaps %ymm2, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm14[1],ymm2[2,3],ymm14[4],ymm2[5,6],ymm14[7]
 ; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps %ymm1, %ymm2
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm12
-; AVX1-NEXT:    vmovaps 272(%rdi), %xmm15
-; AVX1-NEXT:    vmovaps 256(%rdi), %xmm14
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm13 = zero,zero,xmm14[2],xmm15[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm13
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3,4,5],ymm13[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm1[0,3],ymm0[0,2],ymm1[4,7],ymm0[4,6]
+; AVX1-NEXT:    vmovaps 256(%rdi), %ymm11
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm11[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm11[1,0],ymm9[2,0],ymm11[5,4],ymm9[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm12[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm12, %xmm13
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm13[0],xmm12[1,2],xmm13[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm13[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm14[0,1],xmm15[2],xmm14[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm11[2,0],ymm9[3,0],ymm11[6,4],ymm9[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm9[0,0],ymm8[2,0],ymm9[4,4],ymm8[6,4]
+; AVX1-NEXT:    vmovaps 208(%rdi), %xmm0
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps %ymm10, %ymm12
-; AVX1-NEXT:    vmovaps %ymm9, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm9[0,1],xmm10[2],xmm9[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1],ymm14[2],ymm2[3,4],ymm14[5],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[1,2],ymm0[0,3],ymm8[5,6],ymm0[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm12[5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovups (%rsp), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm8, %xmm0
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm1[0,1],xmm8[2],xmm1[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4],ymm7[5,6,7]
-; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm10[2,0],ymm5[3,0],ymm10[6,4],ymm5[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm5[0,0],ymm8[2,0],ymm5[4,4],ymm8[6,4]
+; AVX1-NEXT:    vmovaps 112(%rdi), %xmm12
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm5[0,1],ymm7[2],ymm5[3,4],ymm7[5],ymm5[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm10[1,2],ymm12[0,3],ymm10[5,6],ymm12[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm8[5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm6[2,0],ymm13[3,0],ymm6[6,4],ymm13[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm13[0,0],ymm8[2,0],ymm13[4,4],ymm8[6,4]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm10
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1],ymm2[2],ymm13[3,4],ymm2[5],ymm13[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm11[0,1],xmm8[2],xmm11[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,0,3,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[1,2],ymm10[0,3],ymm6[5,6],ymm10[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm8[5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm4[2,0],ymm15[3,0],ymm4[6,4],ymm15[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm15[0,0],ymm6[2,0],ymm15[4,4],ymm6[6,4]
+; AVX1-NEXT:    vmovaps 304(%rdi), %xmm8
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[1,2],ymm8[0,3],ymm4[5,6],ymm8[4,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm4 = ymm4[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm5[2],ymm7[3,4],ymm5[5],ymm7[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm12[1,0],ymm6[2,0],ymm12[5,4],ymm6[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[2,0],ymm7[0,3],ymm6[6,4],ymm7[4,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-NEXT:    vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm7 = ymm5[0,1],mem[0,3],ymm5[4,5],mem[4,7]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5,6,7]
-; AVX1-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1],ymm5[2],ymm12[3,4],ymm5[5],ymm12[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm9[0,1],xmm10[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm6[0,1,2,3,4],ymm7[5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm1[0,1],xmm0[0,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1],ymm13[2],ymm2[3,4],ymm13[5],ymm2[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm11[0,1],xmm8[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm10[1,0],ymm7[2,0],ymm10[5,4],ymm7[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm7[2,0],ymm2[0,3],ymm7[6,4],ymm2[4,7]
+; AVX1-NEXT:    vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm7 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm7 = ymm13[0,1],mem[0,3],ymm13[4,5],mem[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm7[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm8[1,0],ymm7[2,0],ymm8[5,4],ymm7[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm7[2,0],ymm1[0,3],ymm7[6,4],ymm1[4,7]
+; AVX1-NEXT:    vshufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm7 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm7 = ymm15[0,1],mem[0,3],ymm15[4,5],mem[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm7[5,6,7]
+; AVX1-NEXT:    vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm3 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm3 = ymm14[0,1],mem[2],ymm14[3,4],mem[5],ymm14[6,7]
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm14[0,1],xmm15[0,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm0[1,0],ymm3[2,0],ymm0[5,4],ymm3[6,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm14[0,3],ymm3[6,4],ymm14[4,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm9[0,1],ymm11[0,3],ymm9[4,5],ymm11[4,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm1, 64(%rsi)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
@@ -1111,19 +1042,18 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
 ; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm1, 32(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 96(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm4, 96(%rdx)
+; AVX1-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm1, (%rdx)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm1, 32(%rdx)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm1, 64(%rdx)
 ; AVX1-NEXT:    vmovaps %ymm0, 64(%rcx)
-; AVX1-NEXT:    vmovaps %ymm3, 96(%rcx)
+; AVX1-NEXT:    vmovaps %ymm2, 96(%rcx)
 ; AVX1-NEXT:    vmovaps %ymm5, (%rcx)
-; AVX1-NEXT:    vmovaps %ymm12, 32(%rcx)
-; AVX1-NEXT:    addq $392, %rsp # imm = 0x188
+; AVX1-NEXT:    vmovaps %ymm6, 32(%rcx)
+; AVX1-NEXT:    addq $456, %rsp # imm = 0x1C8
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
index dc73c043b2a3d..8fb6a5296f695 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
@@ -240,57 +240,47 @@ define void @load_i32_stride4_vf8(<32 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ;
 ; AVX1-LABEL: load_i32_stride4_vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm10
-; AVX1-NEXT:    vmovaps 64(%rdi), %xmm1
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm3
-; AVX1-NEXT:    vmovaps 96(%rdi), %xmm2
-; AVX1-NEXT:    vmovaps 112(%rdi), %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[0,0],xmm2[0,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3,4,5],ymm5[6,7]
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm3
-; AVX1-NEXT:    vmovaps 48(%rdi), %xmm5
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm5[0],xmm3[0]
-; AVX1-NEXT:    vmovaps (%rdi), %xmm6
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm1
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm5 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[4],ymm0[4],ymm4[5],ymm0[5]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm5[0,1],ymm3[2,0],ymm5[4,5],ymm3[6,4]
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm5
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm6
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm6[0],xmm5[0]
+; AVX1-NEXT:    vmovaps (%rdi), %xmm3
 ; AVX1-NEXT:    vmovaps 16(%rdi), %xmm7
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm9
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[1],xmm10[1],zero,zero
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm9[6,7]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm11 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm6[1],xmm7[1],zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm9[4,5,6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm2[2],xmm4[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm11
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm11[6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm12 = zero,zero,xmm3[2],xmm5[2]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm12[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm10[3,0],xmm1[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm7[3,0],xmm6[3,0]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm8 = ymm10[0],ymm1[0],ymm10[1],ymm1[1],ymm10[4],ymm1[4],ymm10[5],ymm1[5]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm9 = ymm0[1,0],ymm4[1,0],ymm0[5,4],ymm4[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm9[2,0],ymm8[2,3],ymm9[6,4],ymm8[6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[1],xmm7[1],zero,zero
+; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm2[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm1[1],ymm10[1],ymm1[3],ymm10[3]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm9 = ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[6],ymm0[6],ymm4[7],ymm0[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,0],ymm9[4,5],ymm8[6,4]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm9 = zero,zero,xmm5[2],xmm6[2]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm1 = ymm10[2],ymm1[2],ymm10[3],ymm1[3],ymm10[6],ymm1[6],ymm10[7],ymm1[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm4[3,0],ymm0[7,4],ymm4[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,3],ymm0[6,4],ymm1[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm7[3,0],xmm3[3,0]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vmovaps %ymm8, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm9, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX1-NEXT:    vmovaps %ymm1, (%r8)
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps %ymm11, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm12, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm0, (%r8)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -488,156 +478,135 @@ define void @load_i32_stride4_vf16(<64 x i32>* %in.vec, <16 x i32>* %out.vec0, <
 ;
 ; AVX1-LABEL: load_i32_stride4_vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $216, %rsp
+; AVX1-NEXT:    subq $328, %rsp # imm = 0x148
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm5
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm4
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm3
+; AVX1-NEXT:    vmovaps 224(%rdi), %ymm14
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm14[2,3,0,1]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm14[0],ymm11[0],ymm14[2],ymm11[2]
+; AVX1-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm1 = ymm7[0],ymm3[0],ymm7[1],ymm3[1],ymm7[4],ymm3[4],ymm7[5],ymm3[5]
+; AVX1-NEXT:    vmovaps %ymm3, %ymm12
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
 ; AVX1-NEXT:    vmovaps 160(%rdi), %xmm1
 ; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps 176(%rdi), %xmm0
 ; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-NEXT:    vmovaps 144(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 128(%rdi), %xmm4
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,0]
-; AVX1-NEXT:    vmovaps 208(%rdi), %xmm2
-; AVX1-NEXT:    vmovaps 192(%rdi), %xmm3
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vmovaps %xmm2, %xmm8
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vmovaps 224(%rdi), %xmm6
-; AVX1-NEXT:    vmovaps 240(%rdi), %xmm5
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm5[0,0],xmm6[0,0]
-; AVX1-NEXT:    vmovaps %xmm5, %xmm10
-; AVX1-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm6, %xmm5
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 64(%rdi), %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps 96(%rdi), %xmm2
-; AVX1-NEXT:    vmovaps 112(%rdi), %xmm7
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[0,0],xmm2[0,0]
-; AVX1-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm2, %xmm6
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vmovaps 144(%rdi), %xmm0
+; AVX1-NEXT:    vmovaps 128(%rdi), %xmm2
 ; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm11
-; AVX1-NEXT:    vmovaps 48(%rdi), %xmm12
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm12[0],xmm11[0]
-; AVX1-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps (%rdi), %xmm15
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm13
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm14[0,1],xmm0[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX1-NEXT:    vmovaps %xmm0, %xmm13
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm8[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
-; AVX1-NEXT:    vmovaps %xmm5, %xmm9
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[1],xmm8[1],zero,zero
-; AVX1-NEXT:    vmovaps %xmm8, %xmm10
-; AVX1-NEXT:    vmovaps %xmm3, %xmm8
-; AVX1-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps %xmm4, %xmm5
-; AVX1-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm4[2,3,0,1]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm2[0],ymm4[2],ymm2[2]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm5[2,3,0,1]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm1 = ymm9[0],ymm5[0],ymm9[1],ymm5[1],ymm9[4],ymm5[4],ymm9[5],ymm5[5]
+; AVX1-NEXT:    vmovups %ymm9, (%rsp) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm0
+; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm6
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm0[0]
+; AVX1-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps (%rdi), %xmm10
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm5
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm15 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm11[0],ymm14[0],ymm11[1],ymm14[1],ymm11[4],ymm14[4],ymm11[5],ymm14[5]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm15 = ymm3[1,0],ymm7[1,0],ymm3[5,4],ymm7[5,4]
+; AVX1-NEXT:    vmovaps %ymm7, %ymm11
+; AVX1-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm0[2,3],ymm15[6,4],ymm0[6,7]
+; AVX1-NEXT:    vmovaps %xmm13, %xmm1
+; AVX1-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm4[1],xmm3[1],zero,zero
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm2[0],xmm14[0],xmm2[1],xmm14[1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[1],xmm13[1],zero,zero
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm13 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-NEXT:    vmovaps (%rsp), %xmm7 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm7[1],xmm6[1],zero,zero
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
-; AVX1-NEXT:    vmovaps %xmm11, %xmm12
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm15[1],xmm13[1],zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm2[1,0],ymm9[1,0],ymm2[5,4],ymm9[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[2,0],ymm0[2,3],ymm13[6,4],ymm0[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = xmm10[1],xmm5[1],zero,zero
+; AVX1-NEXT:    vmovaps %xmm5, %xmm9
+; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm6[0,1],xmm0[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm9[2],xmm11[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX1-NEXT:    vmovaps %xmm10, %xmm8
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm2[2],xmm14[2]
-; AVX1-NEXT:    vmovaps %xmm2, %xmm10
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7]
+; AVX1-NEXT:    vmovaps %ymm12, %ymm13
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,0],ymm6[4,5],ymm0[6,4]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX1-NEXT:    vmovaps %xmm7, %xmm5
+; AVX1-NEXT:    vmovaps %xmm8, %xmm1
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = zero,zero,xmm8[2],xmm7[2]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm8[1],ymm12[1],ymm8[3],ymm12[3]
+; AVX1-NEXT:    vmovups (%rsp), %ymm11 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm7 = ymm11[2],ymm2[2],ymm11[3],ymm2[3],ymm11[6],ymm2[6],ymm11[7],ymm2[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm2[2],xmm3[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps %xmm7, %xmm5
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm12[2],xmm7[2]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm15[2],xmm13[2],xmm15[3],xmm13[3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm11[2],xmm9[3],xmm11[3]
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm4 = xmm8[3,0],mem[3,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm14[2],xmm10[3],xmm14[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = xmm6[3,0],mem[3,0]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = zero,zero,xmm4[2],xmm2[2]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[6],ymm14[6],ymm15[7],ymm14[7]
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm3 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm3 = ymm13[3,0],mem[3,0],ymm13[7,4],mem[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm6[2,3],ymm3[6,4],ymm6[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = xmm1[3,0],mem[3,0]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3]
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm2[3,0],xmm5[3,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm12[2],xmm7[2],xmm12[3],xmm7[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm13[3,0],xmm15[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,0,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm3, (%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm3, (%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rcx)
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm4 = ymm12[2],ymm8[2],ymm12[3],ymm8[3],ymm12[6],ymm8[6],ymm12[7],ymm8[7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm11[3,0],ymm1[7,4],ymm11[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm6[2,0],ymm4[2,3],ymm6[6,4],ymm4[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm9[3,0],xmm10[3,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rcx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX1-NEXT:    vmovaps %ymm1, 32(%r8)
-; AVX1-NEXT:    vmovaps %ymm2, (%r8)
-; AVX1-NEXT:    addq $216, %rsp
+; AVX1-NEXT:    vmovaps %ymm3, 32(%r8)
+; AVX1-NEXT:    vmovaps %ymm1, (%r8)
+; AVX1-NEXT:    addq $328, %rsp # imm = 0x148
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1057,333 +1026,293 @@ define void @load_i32_stride4_vf32(<128 x i32>* %in.vec, <32 x i32>* %out.vec0,
 ;
 ; AVX1-LABEL: load_i32_stride4_vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $744, %rsp # imm = 0x2E8
-; AVX1-NEXT:    vmovaps 416(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 432(%rdi), %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-NEXT:    vmovaps 400(%rdi), %xmm2
+; AVX1-NEXT:    subq $1000, %rsp # imm = 0x3E8
+; AVX1-NEXT:    vmovaps 320(%rdi), %ymm3
+; AVX1-NEXT:    vmovaps 352(%rdi), %ymm4
+; AVX1-NEXT:    vmovaps 448(%rdi), %ymm2
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 480(%rdi), %ymm1
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT:    vmovaps 416(%rdi), %xmm2
 ; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 384(%rdi), %xmm1
+; AVX1-NEXT:    vmovaps 432(%rdi), %xmm1
 ; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,0]
-; AVX1-NEXT:    vmovaps 464(%rdi), %xmm2
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT:    vmovaps 400(%rdi), %xmm5
+; AVX1-NEXT:    vmovaps %xmm5, (%rsp) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 384(%rdi), %xmm2
 ; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 448(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vmovaps 480(%rdi), %xmm3
-; AVX1-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 496(%rdi), %xmm2
-; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 288(%rdi), %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm4[2,3,0,1]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX1-NEXT:    vmovaps %ymm1, %ymm6
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps %ymm3, %ymm5
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5]
+; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT:    vmovaps 288(%rdi), %xmm2
+; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps 304(%rdi), %xmm1
 ; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vmovaps 272(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT:    vmovaps 272(%rdi), %xmm3
+; AVX1-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps 256(%rdi), %xmm2
 ; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,0]
-; AVX1-NEXT:    vmovaps 336(%rdi), %xmm14
-; AVX1-NEXT:    vmovaps 320(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1]
-; AVX1-NEXT:    vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vmovaps 352(%rdi), %xmm11
-; AVX1-NEXT:    vmovaps 368(%rdi), %xmm12
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm12[0,0],xmm11[0,0]
-; AVX1-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 224(%rdi), %ymm13
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm13[2,3,0,1]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm13[0],ymm2[0],ymm13[2],ymm2[2]
+; AVX1-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm3 = ymm12[0],ymm0[0],ymm12[1],ymm0[1],ymm12[4],ymm0[4],ymm12[5],ymm0[5]
+; AVX1-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps %ymm0, %ymm14
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,0],ymm3[4,5],ymm2[6,4]
 ; AVX1-NEXT:    vmovaps 160(%rdi), %xmm0
 ; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 176(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vmovaps 144(%rdi), %xmm2
-; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 128(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm8 = xmm1[0,1],xmm0[2,0]
-; AVX1-NEXT:    vmovaps 208(%rdi), %xmm5
-; AVX1-NEXT:    vmovaps 192(%rdi), %xmm10
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
-; AVX1-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm9
-; AVX1-NEXT:    vmovaps 224(%rdi), %xmm3
-; AVX1-NEXT:    vmovaps 240(%rdi), %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm4[0,0],xmm3[0,0]
+; AVX1-NEXT:    vmovaps 176(%rdi), %xmm9
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm9[0],xmm0[0]
+; AVX1-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 144(%rdi), %xmm4
+; AVX1-NEXT:    vmovaps 128(%rdi), %xmm8
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
+; AVX1-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3,4,5],ymm7[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm7[0,1],xmm3[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 64(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vmovaps 96(%rdi), %xmm13
-; AVX1-NEXT:    vmovaps 112(%rdi), %xmm15
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm15[0,0],xmm13[0,0]
-; AVX1-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm6[6,7]
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm0
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm1
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm11 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm11[0,1],ymm10[2,0],ymm11[4,5],ymm10[6,4]
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps 48(%rdi), %xmm0
 ; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-NEXT:    vmovaps (%rdi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm2
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vmovaps (%rdi), %xmm2
 ; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm6[0,1],xmm0[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm3
+; AVX1-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm15 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = mem[0],xmm14[1],zero,zero
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm10[0],ymm6[1],ymm10[1],ymm6[4],ymm10[4],ymm6[5],ymm10[5]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm5[1,0],ymm6[1,0],ymm5[5,4],ymm6[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-NEXT:    vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = mem[0],xmm11[1],zero,zero
-; AVX1-NEXT:    vmovaps (%rsp), %xmm14 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm7 = xmm14[0],mem[0],xmm14[1],mem[1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = xmm10[1],xmm5[1],zero,zero
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-NEXT:    vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = mem[0],xmm1[1],zero,zero
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm7 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm7 = xmm1[0],mem[0],xmm1[1],mem[1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm11[1],xmm5[1],zero,zero
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm7 = xmm2[0],mem[0],xmm2[1],mem[1]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm13[0],ymm0[1],ymm13[1],ymm0[4],ymm13[4],ymm0[5],ymm13[5]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm14[1,0],ymm12[1,0],ymm14[5,4],ymm12[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm8[1],xmm4[1],zero,zero
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = xmm12[1],xmm2[1],zero,zero
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = xmm9[1],xmm5[1],zero,zero
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm15[0],ymm2[0],ymm15[1],ymm2[1],ymm15[4],ymm2[4],ymm15[5],ymm2[5]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[1,0],ymm12[1,0],ymm3[5,4],ymm12[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm8[1],xmm1[1],zero,zero
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = xmm13[1],xmm4[1],zero,zero
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = xmm15[0],mem[0],xmm15[1],mem[1]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm4[0],xmm9[0],xmm4[1],xmm9[1]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm13[1,0],ymm1[5,4],ymm13[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm1 = xmm14[0],mem[0],xmm14[1],mem[1]
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
 ; AVX1-NEXT:    vinsertps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
 ; AVX1-NEXT:    # xmm7 = mem[0],xmm7[1],zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm7[0,1],xmm1[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm0 = zero,zero,xmm0[2],mem[0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm6[2],xmm11[2],xmm6[3],xmm11[3]
-; AVX1-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm7 = zero,zero,xmm14[2],mem[0]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm0 = ymm10[1],mem[1],ymm10[3],mem[3]
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm1 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm7 = zero,zero,xmm5[2],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm0 = zero,zero,xmm0[2],mem[0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm6 = xmm14[2],mem[2],xmm14[3],mem[3]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm1 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm1 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm7 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm7 = zero,zero,xmm11[2],mem[0]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm3[2],xmm10[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm12[2],xmm2[2],xmm12[3],xmm2[3]
-; AVX1-NEXT:    vmovaps %xmm12, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = zero,zero,xmm1[2],xmm8[2]
-; AVX1-NEXT:    vmovaps %xmm1, %xmm12
-; AVX1-NEXT:    vmovaps %xmm8, %xmm10
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm8[2],xmm5[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm13[2],xmm4[2],xmm13[3],xmm4[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = zero,zero,xmm15[2],xmm13[2]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm1 = xmm1[3,0],mem[3,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
 ; AVX1-NEXT:    # xmm1 = xmm11[2],mem[2],xmm11[3],mem[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[3,0],xmm14[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,0,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = zero,zero,xmm5[2],xmm6[2]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[3,0],xmm3[3,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm3 = xmm3[3,0],mem[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm3 = xmm3[3,0],mem[3,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vmovaps (%rsp), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm4 = xmm4[3,0],mem[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm2[1],ymm15[1],ymm2[3],ymm15[3]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm1 = ymm12[2],ymm3[2],ymm12[3],ymm3[3],ymm12[6],ymm3[6],ymm12[7],ymm3[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT:    vmovaps (%rsp), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm8[2],xmm2[2],xmm8[3],xmm2[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = zero,zero,xmm4[2],xmm9[2]
+; AVX1-NEXT:    vmovaps %xmm9, %xmm3
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm15[1],ymm9[1],ymm15[3],ymm9[3]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm1 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm14[2],xmm13[2]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm14[2],xmm8[3],xmm14[3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm7[0,1],xmm1[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm10[3,0],ymm1[7,4],ymm10[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm4 = xmm4[3,0],mem[3,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[3,0],xmm11[3,0]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm4 = ymm4[3,0],mem[3,0],ymm4[7,4],mem[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[2,0],ymm1[2,3],ymm4[6,4],ymm1[6,7]
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm15[3,0],xmm9[3,0]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm5 = xmm2[3,0],mem[3,0]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[2,0,2,3]
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, (%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 32(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 64(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 96(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 96(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, (%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 32(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 64(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 96(%rcx)
-; AVX1-NEXT:    vmovaps %ymm6, (%rcx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 32(%rcx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm4, 64(%rcx)
-; AVX1-NEXT:    vmovaps %ymm2, 64(%r8)
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm4 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm5 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm5 = ymm3[3,0],mem[3,0],ymm3[7,4],mem[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm5[2,0],ymm4[2,3],ymm5[6,4],ymm4[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm5 = xmm3[2],mem[2],xmm3[3],mem[3]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = xmm2[3,0],mem[3,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm2 = ymm9[2],ymm15[2],ymm9[3],ymm15[3],ymm9[6],ymm15[6],ymm9[7],ymm15[7]
+; AVX1-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm3 = ymm12[3,0],mem[3,0],ymm12[7,4],mem[7,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[2,0],ymm2[2,3],ymm3[6,4],ymm2[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm13[2],xmm3[3],xmm13[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm14[3,0],xmm8[3,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[2,0,2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 64(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 96(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 96(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, (%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 64(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 96(%rcx)
+; AVX1-NEXT:    vmovaps %ymm7, (%rcx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rcx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 64(%rcx)
+; AVX1-NEXT:    vmovaps %ymm4, 64(%r8)
 ; AVX1-NEXT:    vmovaps %ymm1, 96(%r8)
-; AVX1-NEXT:    vmovaps %ymm3, (%r8)
+; AVX1-NEXT:    vmovaps %ymm2, (%r8)
 ; AVX1-NEXT:    vmovaps %ymm0, 32(%r8)
-; AVX1-NEXT:    addq $744, %rsp # imm = 0x2E8
+; AVX1-NEXT:    addq $1000, %rsp # imm = 0x3E8
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index 2a48147bbb399..e8d19d56f3c75 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -483,81 +483,75 @@ define void @load_i32_stride6_vf8(<48 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ; AVX1-LABEL: load_i32_stride6_vf8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm11
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm12
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm10
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm4
 ; AVX1-NEXT:    vmovaps 32(%rdi), %ymm9
-; AVX1-NEXT:    vmovaps (%rdi), %ymm10
+; AVX1-NEXT:    vmovaps (%rdi), %ymm7
 ; AVX1-NEXT:    vmovaps 96(%rdi), %ymm0
 ; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm3[0,0],ymm0[6,4],ymm3[4,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm3[2,2],ymm2[6,4],ymm3[6,6]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm9[4,5],ymm10[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm4
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm8[0,1],xmm4[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[0,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5],ymm5[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm6[0,1],xmm5[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,1,0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm7[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm0[3,0],ymm3[1,0],ymm0[7,4],ymm3[5,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm7[2,0],ymm3[2,3],ymm7[6,4],ymm3[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm8[1,0],xmm4[3,0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm7[0,2],xmm4[1,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5],ymm4[6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm6[1],xmm5[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm3[0,1,2,3,4,5],ymm4[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm5[0,0],xmm6[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm10[2,3],ymm9[4,5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm8 = xmm7[2,0],xmm2[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm9 = ymm0[2,1],ymm1[2,0],ymm0[6,5],ymm1[6,4]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,0,1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4],ymm8[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm4[5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,1],xmm6[3,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm7[3,1],xmm2[3,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm0[3,1],ymm1[2,1],ymm0[7,5],ymm1[6,5]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm6[2,3,0,1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm2[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vmovdqa 160(%rdi), %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],mem[4,5,6,7]
-; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm6
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm7 = xmm6[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm5[0,2],xmm7[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm3
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm5
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm5[0,0],ymm0[6,4],ymm5[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm5[2,2],ymm2[6,4],ymm5[6,6]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm9[4,5],ymm7[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm6
+; AVX1-NEXT:    vblendps {{.*#+}} xmm3 = xmm8[0,1],xmm6[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5],ymm3[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm10[4,5],ymm4[6,7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm11[2,0],ymm3[0,0],ymm11[6,4],ymm3[4,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm12[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm0[3,0],ymm5[1,0],ymm0[7,4],ymm5[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm12[2,0],ymm5[2,3],ymm12[6,4],ymm5[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm8[1,0],xmm6[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[1,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4,5],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm11[3,0],ymm3[1,0],ymm11[7,4],ymm3[5,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3],ymm4[4,5],ymm10[6,7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm3[0,0],ymm2[2,0],ymm3[4,4],ymm2[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3],ymm9[4,5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm5
+; AVX1-NEXT:    vshufps {{.*#+}} xmm9 = xmm7[2,0],xmm5[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm11 = ymm0[2,1],ymm1[2,0],ymm0[6,5],ymm1[6,4]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,0,1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm11[3,4],ymm9[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm6[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[0,1],ymm2[3,1],ymm3[4,5],ymm2[7,5]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm7[3,1],xmm5[3,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[3,1],ymm1[2,1],ymm0[7,5],ymm1[6,5]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm10[2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm3[2,0],ymm4[0,0],ymm3[6,4],ymm4[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm4[0,2],ymm5[2,0],ymm4[4,6],ymm5[6,4]
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm7
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm7[2,2,3,3]
 ; AVX1-NEXT:    vmovaps 16(%rdi), %xmm2
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0],xmm4[1],xmm2[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm2[0],xmm6[1],xmm2[2,3]
 ; AVX1-NEXT:    vmovapd 80(%rdi), %xmm10
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm11 = ymm10[1],ymm1[0],ymm10[2],ymm1[2]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,0],ymm0[4,5],ymm11[6,4]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3,4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm7[5,6,7]
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,3],xmm6[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm12 = ymm10[1],ymm1[0],ymm10[2],ymm1[2]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm0[0,1],ymm12[2,0],ymm0[4,5],ymm12[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm12[2,3,4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[3,0],ymm4[1,0],ymm3[7,4],ymm4[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm4[0,3],ymm3[2,0],ymm4[4,7],ymm3[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[1,3,2,3]
 ; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm10[3,1],ymm1[1,3],ymm10[7,5],ymm1[5,7]
 ; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[2,0],ymm0[5,5],ymm1[6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
 ; AVX1-NEXT:    vmovaps %ymm13, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm14, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm8, (%rcx)
-; AVX1-NEXT:    vmovaps %ymm9, (%r8)
-; AVX1-NEXT:    vmovaps %ymm4, (%r9)
+; AVX1-NEXT:    vmovaps %ymm8, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm9, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm11, (%r8)
+; AVX1-NEXT:    vmovaps %ymm5, (%r9)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1182,203 +1176,191 @@ define void @load_i32_stride6_vf16(<96 x i32>* %in.vec, <16 x i32>* %out.vec0, <
 ; AVX1-LABEL: load_i32_stride6_vf16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    subq $328, %rsp # imm = 0x148
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm8
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm10
-; AVX1-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 320(%rdi), %ymm5
-; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 352(%rdi), %ymm6
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm14
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm6
 ; AVX1-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 224(%rdi), %ymm2
-; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 192(%rdi), %ymm3
+; AVX1-NEXT:    vmovaps 320(%rdi), %ymm2
+; AVX1-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 352(%rdi), %ymm3
 ; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 224(%rdi), %ymm4
+; AVX1-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm5
+; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vmovaps 288(%rdi), %ymm1
 ; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vmovaps 256(%rdi), %ymm0
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, 288(%rdi), %ymm0, %ymm7
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm7[0,0],ymm1[6,4],ymm7[4,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[2,2],ymm0[6,4],ymm7[6,6]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm11, %xmm4
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm11[0,1],xmm4[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,3]
+; AVX1-NEXT:    vinsertf128 $1, 288(%rdi), %ymm0, %ymm9
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm9[0,0],ymm1[6,4],ymm9[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm9[2,2],ymm0[6,4],ymm9[6,6]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm5[0,1,2,3],ymm4[4,5],ymm5[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm15
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm15[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm15[0,3]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm14, %xmm15
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm15[0,1],xmm14[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm6[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm5[2,3,0,1]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm10, %ymm6
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm8[2,0],ymm6[0,0],ymm8[6,4],ymm6[4,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm6[2,2],ymm1[6,4],ymm6[6,6]
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm0
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm5[0,0],ymm0[6,4],ymm5[4,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps (%rdi), %ymm2
-; AVX1-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm0
-; AVX1-NEXT:    vblendps {{.*#+}} xmm12 = xmm2[0,1],xmm0[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm12
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm10
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm12[4,5],ymm10[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vblendps {{.*#+}} xmm13 = xmm3[0,1],xmm1[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm13[0,1,0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3,4,5],ymm5[6,7]
-; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm8[3,0],ymm6[1,0],ymm8[7,4],ymm6[5,4]
-; AVX1-NEXT:    vmovaps %ymm8, %ymm9
-; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm6[2,3],ymm5[6,4],ymm6[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[3,0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5],ymm0[6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm3[1],xmm1[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm6, %ymm2
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm14[2,0],ymm2[0,0],ymm14[6,4],ymm2[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm1[2,0],ymm2[2,2],ymm1[6,4],ymm2[6,6]
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm11
+; AVX1-NEXT:    vmovaps (%rdi), %ymm8
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm3[0,1],xmm0[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,2],xmm0[0,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm13[3,4,5],ymm7[6,7]
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm6
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm12
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm6[4,5],ymm12[6,7]
+; AVX1-NEXT:    vmovaps %ymm12, %ymm13
+; AVX1-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm4[2,0],ymm1[0,0],ymm4[6,4],ymm1[4,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm12[6,7]
+; AVX1-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm14[3,0],ymm2[1,0],ymm14[7,4],ymm2[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm7[2,0],ymm2[2,3],ymm7[6,4],ymm2[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[0,2],xmm0[1,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[3,0],ymm1[1,0],ymm4[7,4],ymm1[5,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm8[3,0],ymm7[1,0],ymm8[7,4],ymm7[5,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[2,3],ymm0[6,4],ymm7[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm11[1,0],xmm4[3,0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[1,3]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm12[3,0],ymm9[1,0],ymm12[7,4],ymm9[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm9[2,3],ymm0[6,4],ymm9[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm10[1,0],xmm15[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm15[1,3]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm15[1],xmm14[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm5[1,0],ymm1[7,4],ymm5[5,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $12, (%rsp), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm12
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm9, %ymm14
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm9[2,1],ymm13[2,0],ymm9[6,5],ymm13[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm11[0,1],ymm8[2,3],ymm11[4,5,6,7]
+; AVX1-NEXT:    vmovaps %ymm14, %ymm9
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm14[2,1],ymm10[2,0],ymm14[6,5],ymm10[6,4]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm2[2,0],xmm12[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4],ymm4[5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm1[0,0],xmm4[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[2,0],xmm2[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm13[4,5],ymm6[6,7]
+; AVX1-NEXT:    vmovaps %ymm6, %ymm14
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm3[2,0],ymm4[4,4],ymm3[6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm9 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm8[2,1],ymm11[2,0],ymm8[6,5],ymm11[6,4]
+; AVX1-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm5 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm12[2,1],ymm15[2,0],ymm12[6,5],ymm15[6,4]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm6
-; AVX1-NEXT:    vshufps {{.*#+}} xmm10 = xmm9[2,0],xmm6[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4],ymm10[5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm7
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm5[0,0],xmm7[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm5[2,0],xmm6[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4],ymm7[5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX1-NEXT:    vblendps $207, (%rsp), %ymm13, %ymm7 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm7 = mem[0,1,2,3],ymm13[4,5],mem[6,7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm11 = ymm8[0,0],ymm7[2,0],ymm8[4,4],ymm7[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm11[5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[3,1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[3,1],xmm12[3,3]
-; AVX1-NEXT:    vmovaps %ymm14, %ymm0
-; AVX1-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm14[3,1],ymm13[2,1],ymm14[7,5],ymm13[6,5]
-; AVX1-NEXT:    vmovaps %ymm13, %ymm14
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm5[0,1],xmm7[3,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm9[3,1],xmm6[3,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm8[3,1],ymm11[2,1],ymm8[7,5],ymm11[6,5]
-; AVX1-NEXT:    vmovaps %ymm11, %ymm12
-; AVX1-NEXT:    vmovaps %ymm8, %ymm10
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[3,1],ymm4[4,5],ymm3[7,5]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[3,1],xmm2[3,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm9[3,1],ymm10[2,1],ymm9[7,5],ymm10[6,5]
+; AVX1-NEXT:    vmovaps %ymm10, %ymm11
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm8[0,1],ymm7[3,1],ymm8[4,5],ymm7[7,5]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm5[3,1],xmm6[3,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm12[3,1],ymm15[2,1],ymm12[7,5],ymm15[6,5]
+; AVX1-NEXT:    vmovaps %ymm12, %ymm10
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,0,1]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4],ymm3[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm9
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm9[2,2,3,3]
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm8
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm8[0],xmm4[1],xmm8[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm2 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm2 = mem[0,1],ymm14[2,3],mem[4,5,6,7]
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm14
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm14[2,2,3,3]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm5
+; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2,3]
 ; AVX1-NEXT:    vmovapd 80(%rdi), %xmm6
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm7 = ymm6[1],ymm13[0],ymm6[2],ymm13[2]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm0[0,1],ymm7[2,0],ymm0[4,5],ymm7[6,4]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm7 = ymm6[1],ymm11[0],ymm6[2],ymm11[2]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,0],ymm9[4,5],ymm7[6,4]
+; AVX1-NEXT:    vmovaps %ymm9, %ymm12
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa 160(%rdi), %xmm7
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],mem[4,5,6,7]
-; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm0
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[0,2],xmm1[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 224(%rdi), %xmm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vmovaps 208(%rdi), %xmm2
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0],xmm4[1],xmm2[2,3]
-; AVX1-NEXT:    vmovapd 272(%rdi), %xmm15
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm13 = ymm15[1],ymm11[0],ymm15[2],ymm11[2]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm10[0,1],ymm13[2,0],ymm10[4,5],ymm13[6,4]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm4[0,1],ymm13[2,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa 352(%rdi), %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],mem[4,5,6,7]
-; AVX1-NEXT:    vmovdqa 368(%rdi), %xmm4
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm11 = xmm4[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm3[0,2],xmm11[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm13[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm7[0,3],xmm0[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm8[0,1],xmm9[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[3,1],ymm14[1,3],ymm6[7,5],ymm14[5,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm8[1,1],ymm6[2,0],ymm8[5,5],ymm6[6,4]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,3,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3,4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm0[5,6,7]
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm4[12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,3],xmm4[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm15[3,1],ymm12[1,3],ymm15[7,5],ymm12[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm10[1,1],ymm2[2,0],ymm10[5,5],ymm2[6,4]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rcx)
-; AVX1-NEXT:    vmovups (%rsp), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, 32(%r8)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%r8)
-; AVX1-NEXT:    vmovaps %ymm5, 32(%r9)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%r9)
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm2[2,0],ymm7[0,0],ymm2[6,4],ymm7[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm7[0,2],ymm8[2,0],ymm7[4,6],ymm8[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm8[5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps $12, (%rsp), %ymm13, %ymm4 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm4 = ymm13[0,1],mem[2,3],ymm13[4,5,6,7]
+; AVX1-NEXT:    vmovaps 224(%rdi), %xmm0
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; AVX1-NEXT:    vmovaps 208(%rdi), %xmm3
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3]
+; AVX1-NEXT:    vmovapd 272(%rdi), %xmm8
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm9 = ymm8[1],ymm15[0],ymm8[2],ymm15[2]
+; AVX1-NEXT:    vmovaps %ymm10, %ymm13
+; AVX1-NEXT:    vshufps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,0],ymm10[4,5],ymm9[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3,4,5,6,7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm4[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm4[2,0],ymm9[0,0],ymm4[6,4],ymm9[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm9[0,2],ymm10[2,0],ymm9[4,6],ymm10[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm10[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,0],ymm7[1,0],ymm2[7,4],ymm7[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm7[0,3],ymm2[2,0],ymm7[4,7],ymm2[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm14[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[3,1],ymm11[1,3],ymm6[7,5],ymm11[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm12[1,1],ymm6[2,0],ymm12[5,5],ymm6[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[1,3,2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2,3,4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[3,0],ymm9[1,0],ymm4[7,4],ymm9[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm9[0,3],ymm4[2,0],ymm9[4,7],ymm4[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm8[3,1],ymm15[1,3],ymm8[7,5],ymm15[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm13[1,1],ymm3[2,0],ymm13[5,5],ymm3[6,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, (%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rcx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, 32(%r8)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm3, (%r8)
+; AVX1-NEXT:    vmovaps %ymm1, 32(%r9)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm1, (%r9)
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps %ymm1, 32(%rax)
-; AVX1-NEXT:    vmovaps %ymm0, (%rax)
+; AVX1-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm2, (%rax)
 ; AVX1-NEXT:    addq $328, %rsp # imm = 0x148
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index 806e990911e91..36c95807e44d5 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -206,40 +206,38 @@ define void @store_i32_stride6_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr
 ; AVX1-LABEL: store_i32_stride6_vf4:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps (%rdi), %xmm2
-; AVX1-NEXT:    vmovaps (%rsi), %xmm3
-; AVX1-NEXT:    vmovaps (%rdx), %xmm0
-; AVX1-NEXT:    vmovaps (%rcx), %xmm1
+; AVX1-NEXT:    vmovaps (%rdi), %xmm0
+; AVX1-NEXT:    vmovaps (%rsi), %xmm1
+; AVX1-NEXT:    vmovaps (%rdx), %xmm2
+; AVX1-NEXT:    vmovaps (%rcx), %xmm3
 ; AVX1-NEXT:    vmovaps (%r8), %xmm4
 ; AVX1-NEXT:    vmovaps (%r9), %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm7
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,2,3,1,4,6,7,5]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm1[0,0],xmm0[0,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm8
-; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm8[1,2],ymm7[1,2],ymm8[5,6],ymm7[5,6]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5],ymm7[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm12
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm7[1],ymm4[3],ymm7[3]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm8 = ymm7[0],ymm12[0],ymm7[1],ymm12[1],ymm7[4],ymm12[4],ymm7[5],ymm12[5]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm10
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm3[0,0],xmm2[0,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm11[0,1],ymm6[2,3],ymm11[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5],ymm6[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[6],ymm10[6],ymm9[7],ymm10[7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm2[1,2],ymm9[1,2],ymm2[5,6],ymm9[5,6]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5],ymm3[6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm12[1],ymm7[1],ymm12[3],ymm7[3]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm4 = ymm4[0,2,3,1,4,6,7,5]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[3,0],xmm0[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[3,0],ymm9[3,0],ymm2[7,4],ymm9[7,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-NEXT:    vmovaps %ymm0, 64(%rax)
 ; AVX1-NEXT:    vmovaps %ymm3, 32(%rax)
 ; AVX1-NEXT:    vmovaps %ymm6, (%rax)

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 8012121c3c6a5..63b400162da83 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -345,15 +345,13 @@ define void @PR39483() {
 ; X86-AVX1-LABEL: PR39483:
 ; X86-AVX1:       # %bb.0: # %entry
 ; X86-AVX1-NEXT:    vmovups 32, %ymm0
-; X86-AVX1-NEXT:    vmovups 64, %xmm1
-; X86-AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,3]
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; X86-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
-; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; X86-AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vmovups 64, %ymm1
+; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; X86-AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[0,3],ymm2[4,5],ymm1[4,7]
+; X86-AVX1-NEXT:    vmovups 16, %xmm2
+; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X86-AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,0],ymm3[2,0],ymm2[5,4],ymm3[6,4]
+; X86-AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[0,3],ymm2[6,4],ymm0[4,7]
 ; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; X86-AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vmulps %ymm1, %ymm0, %ymm0
@@ -388,15 +386,13 @@ define void @PR39483() {
 ; X64-AVX1-LABEL: PR39483:
 ; X64-AVX1:       # %bb.0: # %entry
 ; X64-AVX1-NEXT:    vmovups 32, %ymm0
-; X64-AVX1-NEXT:    vmovups 64, %xmm1
-; X64-AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,3]
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; X64-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
-; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; X64-AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX1-NEXT:    vmovups 64, %ymm1
+; X64-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; X64-AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[0,3],ymm2[4,5],ymm1[4,7]
+; X64-AVX1-NEXT:    vmovups 16, %xmm2
+; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X64-AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,0],ymm3[2,0],ymm2[5,4],ymm3[6,4]
+; X64-AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[0,3],ymm2[6,4],ymm0[4,7]
 ; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; X64-AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vmulps %ymm1, %ymm0, %ymm0


        


More information about the llvm-commits mailing list