[llvm] fd485d8 - [X86][AVX] Prefer VINSERTF128 over VPERM2F128 for 128->256 subvector concatenations

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 1 03:51:08 PDT 2021


Author: Simon Pilgrim
Date: 2021-11-01T10:45:50Z
New Revision: fd485d8cda8d5d432f56d4186adcda305f46d6e2

URL: https://github.com/llvm/llvm-project/commit/fd485d8cda8d5d432f56d4186adcda305f46d6e2
DIFF: https://github.com/llvm/llvm-project/commit/fd485d8cda8d5d432f56d4186adcda305f46d6e2.diff

LOG: [X86][AVX] Prefer VINSERTF128 over VPERM2F128 for 128->256 subvector concatenations

The VINSERTF128 instruction is often much quicker, and never slower, than the more general VPERM2F128 instruction, so we should try to use that in more circumstances.

This requires a fallback to a commuted VPERM2F128 for the case where we need to fold the 256-bit vector source instead of the 128-bit subvector source.

There is one interesting side effect - DAGCombine's narrowExtractedVectorLoad combine gets called in a number of locations, this often creates an extracted subvector load without regard to other uses of the original wider load. I'm expecting AVX cpus to be capable of merging such aliased loads, but I do wonder whether narrowExtractedVectorLoad's call to X86TargetLowering::shouldReduceLoadWidth needs to be altered to check for more partial uses?

Noticed while investigating the quality of interleaved load/store codegen.

Differential Revision: https://reviews.llvm.org/D111960

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
    llvm/lib/Target/X86/X86InstrSSE.td
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
    llvm/test/CodeGen/X86/x86-interleaved-access.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 6370190a8726c..29bf71ba1f3ed 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -435,6 +435,18 @@ namespace {
       return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
     }
 
+    SDValue getPermuteVINSERTCommutedImmediate(SDNode *N, unsigned VecWidth,
+                                               const SDLoc &DL) {
+      assert(VecWidth == 128 && "Unexpected vector width");
+      uint64_t Index = N->getConstantOperandVal(2);
+      MVT VecVT = N->getSimpleValueType(0);
+      uint64_t InsertIdx = (Index * VecVT.getScalarSizeInBits()) / VecWidth;
+      assert((InsertIdx == 0 || InsertIdx == 1) && "Bad insertf128 index");
+      // vinsert(0,sub,vec) -> [sub0][vec1] -> vperm2x128(0x30,vec,sub)
+      // vinsert(1,sub,vec) -> [vec0][sub0] -> vperm2x128(0x02,vec,sub)
+      return getI8Imm(InsertIdx ? 0x02 : 0x30, DL);
+    }
+
     // Helper to detect unneeded and instructions on shift amounts. Called
     // from PatFrags in tablegen.
     bool isUnneededShiftMask(SDNode *N, unsigned Width) const {

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9d46892dd20b7..e65a4f9eb1ecd 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -36888,15 +36888,17 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
                             256);
     }
 
-    // If we're splatting the low subvector, an insert-subvector 'concat'
+    // If we're inserting the low subvector, an insert-subvector 'concat'
     // pattern is quicker than VPERM2X128.
     // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
-    if (Mask[0] == 0 && Mask[1] == 0 && !Subtarget.hasAVX2()) {
+    if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
+        !Subtarget.hasAVX2()) {
       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
         return SDValue(); // Nothing to do!
-      Res = CanonicalizeShuffleInput(RootVT, V1);
-      Res = extractSubVector(Res, 0, DAG, DL, 128);
-      return concatSubVectors(Res, Res, DAG, DL);
+      SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
+      SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
+      Hi = extractSubVector(Hi, 0, DAG, DL, 128);
+      return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
     }
 
     if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)

diff  --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 258b2a15d5e4a..166f1f8c3251c 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -1058,6 +1058,12 @@ def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
   return getInsertVINSERTImmediate(N, 128, SDLoc(N));
 }]>;
 
+// INSERT_get_vperm2x128_imm xform function: convert insert_subvector index to
+// commuted VPERM2F128/VPERM2I128 imm.
+def INSERT_get_vperm2x128_commutedimm : SDNodeXForm<insert_subvector, [{
+  return getPermuteVINSERTCommutedImmediate(N, 128, SDLoc(N));
+}]>;
+
 // EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
 // to VEXTRACTF64x4 imm.
 def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{

diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index eeed43089f242..b918b57481063 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -7111,29 +7111,37 @@ let Predicates = [HasAVX1Only] in {
 def : Pat<(v8i32 immAllOnesV), (VCMPPSYrri (AVX_SET0), (AVX_SET0), 0xf)>;
 }
 
-multiclass vinsert_lowering<string InstrStr, ValueType From, ValueType To,
-                            PatFrag memop_frag> {
+multiclass vinsert_lowering<string InstrStr, string PermStr,
+                            ValueType From, ValueType To,
+                            PatFrag frommemop_frag, PatFrag tomemop_frag> {
   def : Pat<(vinsert128_insert:$ins (To VR256:$src1), (From VR128:$src2),
                                    (iPTR imm)),
             (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR128:$src2,
                                        (INSERT_get_vinsert128_imm VR256:$ins))>;
   def : Pat<(vinsert128_insert:$ins (To VR256:$src1),
-                                    (From (memop_frag addr:$src2)),
+                                    (From (frommemop_frag addr:$src2)),
                                     (iPTR imm)),
             (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2,
                                        (INSERT_get_vinsert128_imm VR256:$ins))>;
+  // Folding "To" vector - convert to perm2x128 and commute inputs.
+  def : Pat<(vinsert128_insert:$ins (To (tomemop_frag addr:$src1)),
+                                    (From VR128:$src2),
+                                    (iPTR imm)),
+            (!cast<Instruction>(PermStr#rm)
+              (INSERT_SUBREG (To (IMPLICIT_DEF)), VR128:$src2, sub_xmm),
+              addr:$src1, (INSERT_get_vperm2x128_commutedimm VR256:$ins))>;
 }
 
 let Predicates = [HasAVX, NoVLX] in {
-  defm : vinsert_lowering<"VINSERTF128", v4f32, v8f32, loadv4f32>;
-  defm : vinsert_lowering<"VINSERTF128", v2f64, v4f64, loadv2f64>;
+  defm : vinsert_lowering<"VINSERTF128", "VPERM2F128", v4f32, v8f32, loadv4f32, loadv8f32>;
+  defm : vinsert_lowering<"VINSERTF128", "VPERM2F128", v2f64, v4f64, loadv2f64, loadv4f64>;
 }
 
 let Predicates = [HasAVX1Only] in {
-  defm : vinsert_lowering<"VINSERTF128", v2i64, v4i64,  loadv2i64>;
-  defm : vinsert_lowering<"VINSERTF128", v4i32, v8i32,  loadv4i32>;
-  defm : vinsert_lowering<"VINSERTF128", v8i16, v16i16, loadv8i16>;
-  defm : vinsert_lowering<"VINSERTF128", v16i8, v32i8,  loadv16i8>;
+  defm : vinsert_lowering<"VINSERTF128", "VPERM2F128", v2i64, v4i64,  loadv2i64, loadv4i64>;
+  defm : vinsert_lowering<"VINSERTF128", "VPERM2F128", v4i32, v8i32,  loadv4i32, loadv8i32>;
+  defm : vinsert_lowering<"VINSERTF128", "VPERM2F128", v8i16, v16i16, loadv8i16, loadv16i16>;
+  defm : vinsert_lowering<"VINSERTF128", "VPERM2F128", v16i8, v32i8,  loadv16i8, loadv32i8>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -7742,10 +7750,10 @@ def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
 }
 
 let Predicates = [HasAVX2, NoVLX] in {
-  defm : vinsert_lowering<"VINSERTI128", v2i64, v4i64,  loadv2i64>;
-  defm : vinsert_lowering<"VINSERTI128", v4i32, v8i32,  loadv4i32>;
-  defm : vinsert_lowering<"VINSERTI128", v8i16, v16i16, loadv8i16>;
-  defm : vinsert_lowering<"VINSERTI128", v16i8, v32i8,  loadv16i8>;
+  defm : vinsert_lowering<"VINSERTI128", "VPERM2I128", v2i64, v4i64,  loadv2i64,  loadv4i64>;
+  defm : vinsert_lowering<"VINSERTI128", "VPERM2I128", v4i32, v8i32,  loadv4i32,  loadv8i32>;
+  defm : vinsert_lowering<"VINSERTI128", "VPERM2I128", v8i16, v16i16, loadv8i16,  loadv16i16>;
+  defm : vinsert_lowering<"VINSERTI128", "VPERM2I128", v16i8, v32i8,  loadv16i8,  loadv32i8>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
index 4b3e462a5cc9e..10ca445ef7e4c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -543,99 +543,99 @@ define void @vf16(<64 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.v
 ;
 ; AVX1-LABEL: vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vmovdqa 112(%rdi), %xmm12
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm1[1,2,3],xmm12[4],xmm1[5,6,7]
-; AVX1-NEXT:    vmovdqa 96(%rdi), %xmm4
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm1[1,2,3],xmm4[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm1[1,2,3],xmm5[4],xmm1[5,6,7]
-; AVX1-NEXT:    vmovdqa 64(%rdi), %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm6[0],xmm1[1,2,3],xmm6[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm0, %xmm2, %xmm8
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm11
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa 112(%rdi), %xmm5
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm2[1,2,3],xmm5[4],xmm2[5,6,7]
+; AVX1-NEXT:    vmovdqa 96(%rdi), %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm2[1,2,3],xmm6[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm2[1,2,3],xmm7[4],xmm2[5,6,7]
+; AVX1-NEXT:    vmovdqa 64(%rdi), %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm11
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm12
 ; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm13
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0],xmm1[1,2,3],xmm13[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0],xmm1[1,2,3],xmm11[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0],xmm1[1,2,3],xmm10[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm13[0],xmm2[1,2,3],xmm13[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0],xmm2[1,2,3],xmm12[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm11[0],xmm2[1,2,3],xmm11[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm9
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm9
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm13[0,2,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[0,2,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
 ; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm11[0,2,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[0,2,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm9[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm14 = xmm12[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm15 = xmm4[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm5[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm8[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm12
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm10
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm11[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm2[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm15 = xmm5[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm6[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm8[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[2,0,2,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm0[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm14
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[0,1,2,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm13[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm7[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm11[3,1,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[0,1,3,1,4,5,6,7]
 ; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm13[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,3,4,5,6,7]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm9, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm1, (%rcx)
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm9, (%rsi)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm1, 16(%rsi)
+; AVX1-NEXT:    vmovaps %ymm10, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm11, (%rcx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%r8)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1205,184 +1205,179 @@ define void @vf32(<128 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
 ;
 ; AVX1-LABEL: vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $232, %rsp
-; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa 112(%rdi), %xmm14
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm0[1,2,3],xmm14[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa 96(%rdi), %xmm10
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0],xmm0[1,2,3],xmm10[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vmovdqa 80(%rdi), %xmm9
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0],xmm0[1,2,3],xmm9[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1,2,3],xmm3[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm3
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm5
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm7
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm0[1,2,3],xmm7[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0],xmm0[1,2,3],xmm8[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm0[1,2,3],xmm5[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm1, %xmm11
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[0,1],ymm3[0,1]
-; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    subq $200, %rsp
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX1-NEXT:    vmovdqa 240(%rdi), %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm1, %xmm12
 ; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa 224(%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 224(%rdi), %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm2, %xmm14
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovdqa 208(%rdi), %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm2, %xmm15
+; AVX1-NEXT:    vmovdqa 192(%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa 160(%rdi), %xmm1
 ; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vmovdqa 208(%rdi), %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovdqa 144(%rdi), %xmm1
 ; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 192(%rdi), %xmm13
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm13[0],xmm0[1,2,3],xmm13[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm0[1,2,3],xmm5[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 160(%rdi), %xmm15
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0],xmm0[1,2,3],xmm15[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vmovdqa 144(%rdi), %xmm12
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0],xmm0[1,2,3],xmm12[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 128(%rdi), %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0],xmm0[1,2,3],xmm6[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm6, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm2[0,1]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm8[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm11, %xmm8
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm0 = mem[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa 128(%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 112(%rdi), %xmm8
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0],xmm4[1,2,3],xmm8[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa 96(%rdi), %xmm5
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm4[1,2,3],xmm5[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovdqa 80(%rdi), %xmm9
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm4[1,2,3],xmm9[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa 64(%rdi), %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm10
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm10[0],xmm4[1,2,3],xmm10[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm7, %xmm13
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm11
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0],xmm4[1,2,3],xmm11[4],xmm4[5,6,7]
+; AVX1-NEXT:    vpackusdw %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vpackusdw %xmm13, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm8[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm9[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm10[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm11[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm13[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[0,2,2,3]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm12[0,2,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm12[0,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,2,2,3]
+; AVX1-NEXT:    vmovdqa %xmm14, %xmm12
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm14[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX1-NEXT:    vmovdqa %xmm15, %xmm14
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[0,2,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[3,1,2,3]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm2 = mem[3,1,2,3]
+; AVX1-NEXT:    vmovdqa (%rsp), %xmm15 # 16-byte Reload
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = mem[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX1-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = mem[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm7 = mem[0,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[3,1,2,3]
 ; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm9[3,1,2,3]
+; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[3,1,2,3]
+; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm9 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[3,1,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm10[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm8[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[0,1,2,0,4,5,6,7]
 ; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm1 = mem[3,1,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[3,1,2,3]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
-; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm9 = mem[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm12 = xmm10[3,1,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm8[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[3,1,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm4[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
 ; AVX1-NEXT:    # xmm10 = mem[3,1,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm11[3,1,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm11 = xmm12[3,1,2,3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,1,2,0,4,5,6,7]
 ; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm5 = mem[3,1,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm14 = mem[3,1,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm14[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[0,1,2,0,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm4 = mem[3,1,2,3]
-; AVX1-NEXT:    vpshufd $231, (%rsp), %xmm2 # 16-byte Folded Reload
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm12 = xmm14[3,1,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm9 = xmm15[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm9[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[3,1,2,3]
+; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm15 = mem[3,1,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm14[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[0,1,2,0,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm3 = mem[3,1,2,3]
+; AVX1-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
 ; AVX1-NEXT:    # xmm2 = mem[3,1,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm2[2,0,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm3[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm2[2,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-NEXT:    # xmm0 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -1390,49 +1385,53 @@ define void @vf32(<128 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
 ; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX1-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
 ; AVX1-NEXT:    # xmm1 = mem[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm3 = mem[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX1-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm7 = mem[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm12[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm13[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm14[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm11[0,1,3,1,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm9[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[0,1,3,1,4,5,6,7]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm15[0,1,3,1,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm2, (%rsi)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm2, 16(%rsi)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm2, 32(%rsi)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm2, 48(%rsi)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm2, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm11, 32(%rcx)
+; AVX1-NEXT:    vmovaps %ymm13, 32(%rcx)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
 ; AVX1-NEXT:    vmovaps %ymm1, 32(%r8)
 ; AVX1-NEXT:    vmovaps %ymm0, (%r8)
-; AVX1-NEXT:    addq $232, %rsp
+; AVX1-NEXT:    addq $200, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
index 823b2d70a7031..5dcf66c08a1bd 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
@@ -110,12 +110,11 @@ define void @load_i32_stride2_vf8(<16 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ; AVX1-LABEL: load_i32_stride2_vf8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
+; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -190,20 +189,18 @@ define void @load_i32_stride2_vf16(<32 x i32>* %in.vec, <16 x i32>* %out.vec0, <
 ; AVX1-LABEL: load_i32_stride2_vf16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm3
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm0[0,2],ymm4[0,2],ymm0[4,6],ymm4[4,6]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],ymm3[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[0,1],ymm3[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm2[0,2],ymm5[0,2],ymm2[4,6],ymm5[4,6]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm4[1,3],ymm0[5,7],ymm4[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm5[1,3],ymm2[5,7],ymm5[5,7]
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm1[0,2],ymm4[0,2],ymm1[4,6],ymm4[4,6]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,3],ymm4[1,3],ymm1[5,7],ymm4[5,7]
+; AVX1-NEXT:    vmovaps %ymm5, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm3, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm1, 32(%rdx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -318,37 +315,33 @@ define void @load_i32_stride2_vf32(<64 x i32>* %in.vec, <32 x i32>* %out.vec0, <
 ; AVX1-LABEL: load_i32_stride2_vf32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm3
-; AVX1-NEXT:    vmovaps 224(%rdi), %ymm4
-; AVX1-NEXT:    vmovaps 192(%rdi), %ymm5
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm6
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm7
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm6[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm7[0,1],ymm6[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm6[0,2],ymm8[0,2],ymm6[4,6],ymm8[4,6]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm5[2,3],ymm4[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm5[0,1],ymm4[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm4[0,2],ymm9[0,2],ymm4[4,6],ymm9[4,6]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm2[2,3],ymm3[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[0,1],ymm3[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm2[0,2],ymm10[0,2],ymm2[4,6],ymm10[4,6]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm0[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm0[0,2],ymm11[0,2],ymm0[4,6],ymm11[4,6]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[1,3],ymm9[1,3],ymm4[5,7],ymm9[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[1,3],ymm8[1,3],ymm6[5,7],ymm8[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm10[1,3],ymm2[5,7],ymm10[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm11[1,3],ymm0[5,7],ymm11[5,7]
-; AVX1-NEXT:    vmovaps %ymm5, 96(%rsi)
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
-; AVX1-NEXT:    vmovaps %ymm7, 64(%rsi)
-; AVX1-NEXT:    vmovaps %ymm6, 64(%rdx)
-; AVX1-NEXT:    vmovaps %ymm4, 96(%rdx)
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm3
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm2, %ymm2
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm1[0,2],ymm6[0,2],ymm1[4,6],ymm6[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vshufps {{.*#+}} ymm9 = ymm0[0,2],ymm8[0,2],ymm0[4,6],ymm8[4,6]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm3[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 224(%rdi), %ymm3, %ymm3
+; AVX1-NEXT:    vshufps {{.*#+}} ymm11 = ymm3[0,2],ymm10[0,2],ymm3[4,6],ymm10[4,6]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,3],ymm6[1,3],ymm1[5,7],ymm6[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm8[1,3],ymm0[5,7],ymm8[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[1,3],ymm10[1,3],ymm3[5,7],ymm10[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm4[1,3],ymm2[5,7],ymm4[5,7]
+; AVX1-NEXT:    vmovaps %ymm11, 96(%rsi)
+; AVX1-NEXT:    vmovaps %ymm9, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm7, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm5, 64(%rsi)
+; AVX1-NEXT:    vmovaps %ymm2, 64(%rdx)
+; AVX1-NEXT:    vmovaps %ymm3, 96(%rdx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
+; AVX1-NEXT:    vmovaps %ymm1, 32(%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index ea4a61fd5852a..2a48147bbb399 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -483,52 +483,52 @@ define void @load_i32_stride6_vf8(<48 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ; AVX1-LABEL: load_i32_stride6_vf8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm11
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm12
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm9
+; AVX1-NEXT:    vmovaps (%rdi), %ymm10
 ; AVX1-NEXT:    vmovaps 96(%rdi), %ymm0
 ; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm9
-; AVX1-NEXT:    vmovaps (%rdi), %ymm11
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm6
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0,1],xmm3[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[0,1,0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm8
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm9[4,5],ymm11[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm5
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm10[0,1],xmm5[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm1[0,1],ymm0[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm0[2,0],ymm12[0,0],ymm0[6,4],ymm12[4,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[2,0],ymm1[2,2],ymm13[6,4],ymm1[6,6]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm13[3,4,5],ymm4[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3,4,5],ymm8[6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm2[1],xmm3[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm10[1,0],xmm5[3,0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[1,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm0[3,0],ymm12[1,0],ymm0[7,4],ymm12[5,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,0],ymm1[2,3],ymm4[6,4],ymm1[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5],ymm3[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm2[0,0],xmm5[2,0]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm3[0,0],ymm0[6,4],ymm3[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm3[2,2],ymm2[6,4],ymm3[6,6]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm9[4,5],ymm10[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm4
+; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm8[0,1],xmm4[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[0,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5],ymm5[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm6[0,1],xmm5[2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,1,0,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm7[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm0[3,0],ymm3[1,0],ymm0[7,4],ymm3[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm7[2,0],ymm3[2,3],ymm7[6,4],ymm3[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm8[1,0],xmm4[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm7[0,2],xmm4[1,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5],ymm4[6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm6[1],xmm5[3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm11[2,3],ymm9[4,5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vshufps {{.*#+}} xmm9 = xmm6[2,0],xmm7[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm0[2,1],ymm1[2,0],ymm0[6,5],ymm1[6,4]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm10[2,3,0,1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3,4],ymm9[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm4[5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm5[3,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm6[3,1],xmm7[3,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm3[0,1,2,3,4,5],ymm4[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm5[0,0],xmm6[2,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm10[2,3],ymm9[4,5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm2
+; AVX1-NEXT:    vshufps {{.*#+}} xmm8 = xmm7[2,0],xmm2[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm9 = ymm0[2,1],ymm1[2,0],ymm0[6,5],ymm1[6,4]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,0,1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4],ymm8[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm4[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,1],xmm6[3,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm7[3,1],xmm2[3,3]
 ; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm0[3,1],ymm1[2,1],ymm0[7,5],ymm1[6,5]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm6[2,3,0,1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm5[0,1,2,3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm2[0,1,2,3,4],ymm5[5,6,7]
 ; AVX1-NEXT:    vmovdqa 160(%rdi), %xmm5
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],mem[4,5,6,7]
 ; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm6
@@ -539,24 +539,24 @@ define void @load_i32_stride6_vf8(<48 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[2,2,3,3]
 ; AVX1-NEXT:    vmovaps 16(%rdi), %xmm2
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0],xmm4[1],xmm2[2,3]
-; AVX1-NEXT:    vmovapd 80(%rdi), %xmm11
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm13 = ymm11[1],ymm1[0],ymm11[2],ymm1[2]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm0[0,1],ymm13[2,0],ymm0[4,5],ymm13[6,4]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm13[2,3,4,5,6,7]
+; AVX1-NEXT:    vmovapd 80(%rdi), %xmm10
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm11 = ymm10[1],ymm1[0],ymm10[2],ymm1[2]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,0],ymm0[4,5],ymm11[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3,4,5,6,7]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm7[5,6,7]
 ; AVX1-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10,11]
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,3],xmm6[2,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm11[3,1],ymm1[1,3],ymm11[7,5],ymm1[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm10[3,1],ymm1[1,3],ymm10[7,5],ymm1[5,7]
 ; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,1],ymm1[2,0],ymm0[5,5],ymm1[6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
-; AVX1-NEXT:    vmovaps %ymm8, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm12, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm9, (%rcx)
-; AVX1-NEXT:    vmovaps %ymm10, (%r8)
+; AVX1-NEXT:    vmovaps %ymm13, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm14, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm8, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm9, (%r8)
 ; AVX1-NEXT:    vmovaps %ymm4, (%r9)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX1-NEXT:    vzeroupper
@@ -1181,201 +1181,205 @@ define void @load_i32_stride6_vf16(<96 x i32>* %in.vec, <16 x i32>* %out.vec0, <
 ;
 ; AVX1-LABEL: load_i32_stride6_vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $264, %rsp # imm = 0x108
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm7
-; AVX1-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps (%rdi), %ymm8
-; AVX1-NEXT:    vmovups %ymm8, (%rsp) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 288(%rdi), %ymm2
-; AVX1-NEXT:    vmovaps 256(%rdi), %ymm3
-; AVX1-NEXT:    vmovaps 224(%rdi), %ymm1
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 192(%rdi), %ymm4
-; AVX1-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 320(%rdi), %ymm0
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 352(%rdi), %ymm5
+; AVX1-NEXT:    subq $328, %rsp # imm = 0x148
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm8
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm10
+; AVX1-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 320(%rdi), %ymm5
 ; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm15 = ymm4[0,1,2,3],ymm1[4,5],ymm4[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm15, %xmm4
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm4[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm1[0,2],xmm4[0,3]
+; AVX1-NEXT:    vmovaps 352(%rdi), %ymm6
+; AVX1-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 224(%rdi), %ymm2
 ; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm3
 ; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm3[0,1],ymm2[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm14 = ymm2[2,0],ymm13[0,0],ymm2[6,4],ymm13[4,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm3[2,2],ymm14[6,4],ymm3[6,6]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm14[3,4,5],ymm6[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX1-NEXT:    vmovaps 288(%rdi), %ymm1
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 256(%rdi), %ymm0
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm14, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm14[0,1],xmm1[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm5[0,2],xmm1[0,3]
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm9
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm8
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm8[0,1],ymm9[0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm9[2,0],ymm3[0,0],ymm9[6,4],ymm3[4,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[2,0],ymm8[2,2],ymm12[6,4],ymm8[6,6]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0,1,2],ymm12[3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vinsertf128 $1, 288(%rdi), %ymm0, %ymm7
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm7[0,0],ymm1[6,4],ymm7[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[2,2],ymm0[6,4],ymm7[6,6]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm11, %xmm4
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm11[0,1],xmm4[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm14, %xmm15
+; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm15[0,1],xmm14[2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,0,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm6[6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm10, %ymm6
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm8[2,0],ymm6[0,0],ymm8[6,4],ymm6[4,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm6[2,2],ymm1[6,4],ymm6[6,6]
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm0
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps (%rdi), %ymm2
+; AVX1-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm0
+; AVX1-NEXT:    vblendps {{.*#+}} xmm12 = xmm2[0,1],xmm0[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
 ; AVX1-NEXT:    vmovaps 128(%rdi), %ymm12
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm12[4,5],ymm6[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vblendps {{.*#+}} xmm11 = xmm2[0,1],xmm0[2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm11[0,1,0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm7[6,7]
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm10
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm12[4,5],ymm10[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vblendps {{.*#+}} xmm13 = xmm3[0,1],xmm1[2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm13[0,1,0,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3,4,5],ymm5[6,7]
 ; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm2[1],xmm0[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm14[1,0],xmm1[3,0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[1,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm3[1,0],ymm9[7,4],ymm3[5,4]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm8[2,3],ymm2[6,4],ymm8[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm8[3,0],ymm6[1,0],ymm8[7,4],ymm6[5,4]
+; AVX1-NEXT:    vmovaps %ymm8, %ymm9
+; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm6[2,3],ymm5[6,4],ymm6[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm3[1],xmm1[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertps $35, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm0 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm0 = zero,zero,mem[0],xmm10[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm15[1,0],xmm4[3,0]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm8[3,0],ymm7[1,0],ymm8[7,4],ymm7[5,4]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[2,3],ymm0[6,4],ymm7[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm11[1,0],xmm4[3,0]
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[1,3]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm13[1,0],ymm4[7,4],ymm13[5,4]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm13[2,3],ymm2[6,4],ymm13[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm15[1],xmm14[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1,2,3],ymm6[4,5],ymm12[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $12, (%rsp), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm3 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm15
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm9[2,1],ymm8[2,0],ymm9[6,5],ymm8[6,4]
+; AVX1-NEXT:    vblendps $12, (%rsp), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm12
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm9, %ymm14
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm9[2,1],ymm13[2,0],ymm9[6,5],ymm13[6,4]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm3[2,0],xmm15[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[0,0],xmm6[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm2[2,0],xmm12[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4],ymm4[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm1[0,0],xmm4[2,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm7 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX1-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm10 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[2,1],ymm13[2,0],ymm4[6,5],ymm13[6,4]
-; AVX1-NEXT:    vmovaps %ymm4, %ymm11
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm12 = xmm10[2,0],xmm4[2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4],ymm12[5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm0
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm7[0,0],xmm0[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm5[5,6,7]
+; AVX1-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm9 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm8[2,1],ymm11[2,0],ymm8[6,5],ymm11[6,4]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm6
+; AVX1-NEXT:    vshufps {{.*#+}} xmm10 = xmm9[2,0],xmm6[2,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4],ymm10[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm7
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm5[0,0],xmm7[2,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[3,1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[3,1],xmm12[3,3]
+; AVX1-NEXT:    vmovaps %ymm14, %ymm0
+; AVX1-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm14[3,1],ymm13[2,1],ymm14[7,5],ymm13[6,5]
+; AVX1-NEXT:    vmovaps %ymm13, %ymm14
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
 ; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm6[3,1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[3,1],xmm15[3,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm9[3,1],ymm8[2,1],ymm9[7,5],ymm8[6,5]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4],ymm3[5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm5[0,1],xmm7[3,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm7[0,1],xmm0[3,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm10[3,1],xmm4[3,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm11[3,1],ymm13[2,1],ymm11[7,5],ymm13[6,5]
-; AVX1-NEXT:    vmovaps %ymm11, %ymm5
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm9[3,1],xmm6[3,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm4 = ymm8[3,1],ymm11[2,1],ymm8[7,5],ymm11[6,5]
+; AVX1-NEXT:    vmovaps %ymm11, %ymm12
+; AVX1-NEXT:    vmovaps %ymm8, %ymm10
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,0,1]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4],ymm3[5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm12
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm12[2,2,3,3]
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm10
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm10[0],xmm4[1],xmm10[2,3]
-; AVX1-NEXT:    vmovapd 80(%rdi), %xmm11
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm7 = ymm11[1],ymm8[0],ymm11[2],ymm8[2]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,0],ymm9[4,5],ymm7[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm9
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm9[2,2,3,3]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm8
+; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm8[0],xmm4[1],xmm8[2,3]
+; AVX1-NEXT:    vmovapd 80(%rdi), %xmm6
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm7 = ymm6[1],ymm13[0],ymm6[2],ymm13[2]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm0[0,1],ymm7[2,0],ymm0[4,5],ymm7[6,4]
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
 ; AVX1-NEXT:    vmovdqa 160(%rdi), %xmm7
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],mem[4,5,6,7]
-; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm2
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm1 = xmm2[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vmovdqa 176(%rdi), %xmm0
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[0,2],xmm1[2,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vmovaps 224(%rdi), %xmm1
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vmovaps 208(%rdi), %xmm3
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm3[0],xmm4[1],xmm3[2,3]
-; AVX1-NEXT:    vmovapd 272(%rdi), %xmm0
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm14 = ymm0[1],ymm13[0],ymm0[2],ymm13[2]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm14 = ymm5[0,1],ymm14[2,0],ymm5[4,5],ymm14[6,4]
-; AVX1-NEXT:    vmovaps %ymm5, %ymm13
-; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm4[0,1],ymm14[2,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa 352(%rdi), %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],mem[4,5,6,7]
+; AVX1-NEXT:    vmovaps 208(%rdi), %xmm2
+; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0],xmm4[1],xmm2[2,3]
+; AVX1-NEXT:    vmovapd 272(%rdi), %xmm15
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm13 = ymm15[1],ymm11[0],ymm15[2],ymm11[2]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm13 = ymm10[0,1],ymm13[2,0],ymm10[4,5],ymm13[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm4[0,1],ymm13[2,3,4,5,6,7]
+; AVX1-NEXT:    vmovdqa 352(%rdi), %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],mem[4,5,6,7]
 ; AVX1-NEXT:    vmovdqa 368(%rdi), %xmm4
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm15 = xmm4[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[0,2],xmm15[2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm14[0,1,2,3,4],ymm6[5,6,7]
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm7[0,3],xmm2[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm10[0,1],xmm12[2,3]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm11[3,1],ymm8[1,3],ymm11[7,5],ymm8[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm9[1,1],ymm8[2,0],ymm9[5,5],ymm8[6,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm11 = xmm4[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm3[0,2],xmm11[2,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm13[0,1,2,3,4],ymm5[5,6,7]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10,11]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm7[0,3],xmm0[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm7 = xmm8[0,1],xmm9[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[3,1],ymm14[1,3],ymm6[7,5],ymm14[5,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm8[1,1],ymm6[2,0],ymm8[5,5],ymm6[6,4]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[1,3,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3,4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6,7]
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm4[12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm5[0,3],xmm4[2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
-; AVX1-NEXT:    vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm0 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm13[1,1],ymm0[2,0],ymm13[5,5],ymm0[6,4]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3,4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm0[5,6,7]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm4[12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,3],xmm4[2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm15[3,1],ymm12[1,3],ymm15[7,5],ymm12[5,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm2 = ymm10[1,1],ymm2[2,0],ymm10[5,5],ymm2[6,4]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 32(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 32(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, (%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 32(%rcx)
-; AVX1-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, (%rcx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 32(%r8)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, (%r8)
-; AVX1-NEXT:    vmovaps %ymm6, 32(%r9)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, (%r9)
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rcx)
+; AVX1-NEXT:    vmovups (%rsp), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, 32(%r8)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%r8)
+; AVX1-NEXT:    vmovaps %ymm5, 32(%r9)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm2, (%r9)
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX1-NEXT:    vmovaps %ymm2, (%rax)
-; AVX1-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX1-NEXT:    vmovaps %ymm1, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm0, (%rax)
+; AVX1-NEXT:    addq $328, %rsp # imm = 0x148
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
index e6fafb606c521..352d447876805 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
@@ -72,12 +72,11 @@ define void @load_i64_stride2_vf4(<8 x i64>* %in.vec, <4 x i64>* %out.vec0, <4 x
 ; AVX1-LABEL: load_i64_stride2_vf4:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT:    vmovaps %ymm2, (%rsi)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -154,20 +153,18 @@ define void @load_i64_stride2_vf8(<16 x i64>* %in.vec, <8 x i64>* %out.vec0, <8
 ; AVX1-LABEL: load_i64_stride2_vf8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm3
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],ymm3[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[0,1],ymm3[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm2[0],ymm5[0],ymm2[2],ymm5[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm4[1],ymm0[3],ymm4[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm5[1],ymm2[3],ymm5[3]
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
+; AVX1-NEXT:    vmovaps %ymm5, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm3, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm1, 32(%rdx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -282,37 +279,33 @@ define void @load_i64_stride2_vf16(<32 x i64>* %in.vec, <16 x i64>* %out.vec0, <
 ; AVX1-LABEL: load_i64_stride2_vf16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm3
-; AVX1-NEXT:    vmovaps 224(%rdi), %ymm4
-; AVX1-NEXT:    vmovaps 192(%rdi), %ymm5
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm6
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm7
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm6[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm7[0,1],ymm6[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm5[2,3],ymm4[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm5[0,1],ymm4[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm4[0],ymm9[0],ymm4[2],ymm9[2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm2[2,3],ymm3[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[0,1],ymm3[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm0[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm11[0],ymm0[2],ymm11[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm9[1],ymm4[3],ymm9[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm10[1],ymm2[3],ymm10[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
-; AVX1-NEXT:    vmovaps %ymm5, 96(%rsi)
-; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rsi)
-; AVX1-NEXT:    vmovaps %ymm7, 64(%rsi)
-; AVX1-NEXT:    vmovaps %ymm6, 64(%rdx)
-; AVX1-NEXT:    vmovaps %ymm4, 96(%rdx)
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm3
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm2, %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm3[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 224(%rdi), %ymm3, %ymm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm3[0],ymm10[0],ymm3[2],ymm10[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm6[1],ymm1[3],ymm6[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm8[1],ymm0[3],ymm8[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm10[1],ymm3[3],ymm10[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm4[1],ymm2[3],ymm4[3]
+; AVX1-NEXT:    vmovaps %ymm11, 96(%rsi)
+; AVX1-NEXT:    vmovaps %ymm9, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm7, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm5, 64(%rsi)
+; AVX1-NEXT:    vmovaps %ymm2, 64(%rdx)
+; AVX1-NEXT:    vmovaps %ymm3, 96(%rdx)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm2, 32(%rdx)
+; AVX1-NEXT:    vmovaps %ymm1, 32(%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -538,76 +531,64 @@ define void @load_i64_stride2_vf32(<64 x i64>* %in.vec, <32 x i64>* %out.vec0, <
 ;
 ; AVX1-LABEL: load_i64_stride2_vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovaps 64(%rdi), %ymm1
-; AVX1-NEXT:    vmovaps 96(%rdi), %ymm2
-; AVX1-NEXT:    vmovaps 480(%rdi), %ymm3
-; AVX1-NEXT:    vmovaps 448(%rdi), %ymm4
-; AVX1-NEXT:    vmovaps 288(%rdi), %ymm5
-; AVX1-NEXT:    vmovaps 256(%rdi), %ymm6
-; AVX1-NEXT:    vmovaps 352(%rdi), %ymm7
-; AVX1-NEXT:    vmovaps 320(%rdi), %ymm8
-; AVX1-NEXT:    vmovaps 160(%rdi), %ymm9
-; AVX1-NEXT:    vmovaps 128(%rdi), %ymm10
-; AVX1-NEXT:    vmovaps 224(%rdi), %ymm11
-; AVX1-NEXT:    vmovaps 192(%rdi), %ymm12
-; AVX1-NEXT:    vmovaps 416(%rdi), %ymm13
-; AVX1-NEXT:    vmovaps 384(%rdi), %ymm14
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm14[2,3],ymm13[2,3]
-; AVX1-NEXT:    vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm14[0,1],ymm13[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm12[2,3],ymm11[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm12[0,1],ymm11[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm10[2,3],ymm9[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm10[0,1],ymm9[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm8[2,3],ymm7[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm8[0,1],ymm7[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm6[2,3],ymm5[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm6[0,1],ymm5[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm4[2,3],ymm3[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm4[0,1],ymm3[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm1[0,1],ymm2[0,1]
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm2
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[0,1],ymm2[0,1]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm14[0],ymm13[2],ymm14[2]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm13[1],ymm14[1],ymm13[3],ymm14[3]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm14 = ymm11[0],ymm12[0],ymm11[2],ymm12[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm11 = ymm11[1],ymm12[1],ymm11[3],ymm12[3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm9[0],ymm10[0],ymm9[2],ymm10[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm9 = ymm9[1],ymm10[1],ymm9[3],ymm10[3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm8 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm15[1],ymm0[1],ymm15[3],ymm0[3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm4[1],ymm3[3],ymm4[3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX1-NEXT:    vmovaps %ymm8, 224(%rsi)
-; AVX1-NEXT:    vmovaps %ymm10, 128(%rsi)
-; AVX1-NEXT:    vmovaps %ymm12, 160(%rsi)
-; AVX1-NEXT:    vmovaps %ymm14, 64(%rsi)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 96(%rsi)
-; AVX1-NEXT:    vmovaps %ymm4, (%rsi)
-; AVX1-NEXT:    vmovaps %ymm15, 32(%rsi)
-; AVX1-NEXT:    vmovaps %ymm6, 192(%rsi)
-; AVX1-NEXT:    vmovaps %ymm13, 192(%rdx)
-; AVX1-NEXT:    vmovaps %ymm5, 224(%rdx)
-; AVX1-NEXT:    vmovaps %ymm7, 128(%rdx)
-; AVX1-NEXT:    vmovaps %ymm9, 160(%rdx)
-; AVX1-NEXT:    vmovaps %ymm11, 64(%rdx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm1, 96(%rdx)
-; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
-; AVX1-NEXT:    vmovaps %ymm3, 32(%rdx)
+; AVX1-NEXT:    vmovaps 448(%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 256(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps 320(%rdi), %ymm3
+; AVX1-NEXT:    vmovaps (%rdi), %ymm4
+; AVX1-NEXT:    vmovaps 64(%rdi), %ymm2
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm5
+; AVX1-NEXT:    vmovaps 192(%rdi), %ymm6
+; AVX1-NEXT:    vmovaps 384(%rdi), %ymm7
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm2[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm2, %ymm9
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm9[0],ymm8[0],ymm9[2],ymm8[2]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm4, %ymm4
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm4[0],ymm10[0],ymm4[2],ymm10[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm6[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 224(%rdi), %ymm6, %ymm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm13 = ymm6[0],ymm12[0],ymm6[2],ymm12[2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm5[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm5, %ymm5
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm5[0],ymm14[0],ymm5[2],ymm14[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm9[1],ymm8[1],ymm9[3],ymm8[3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm3[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 352(%rdi), %ymm3, %ymm3
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm10[1],ymm4[3],ymm10[3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm3[0],ymm9[0],ymm3[2],ymm9[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm12[1],ymm6[3],ymm12[3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 288(%rdi), %ymm1, %ymm1
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm14[1],ymm5[3],ymm14[3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm14 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm9[1],ymm3[3],ymm9[3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 480(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm12[1],ymm1[3],ymm12[3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm9[0],ymm0[2],ymm9[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm9[1],ymm0[3],ymm9[3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm7[2,3],mem[2,3]
+; AVX1-NEXT:    vinsertf128 $1, 416(%rdi), %ymm7, %ymm7
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm7[1],ymm9[1],ymm7[3],ymm9[3]
+; AVX1-NEXT:    vmovaps %ymm12, 224(%rsi)
+; AVX1-NEXT:    vmovaps %ymm14, 128(%rsi)
+; AVX1-NEXT:    vmovaps %ymm10, 160(%rsi)
+; AVX1-NEXT:    vmovaps %ymm15, 64(%rsi)
+; AVX1-NEXT:    vmovaps %ymm13, 96(%rsi)
+; AVX1-NEXT:    vmovaps %ymm11, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm9, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm0, 192(%rsi)
+; AVX1-NEXT:    vmovaps %ymm7, 192(%rdx)
+; AVX1-NEXT:    vmovaps %ymm1, 224(%rdx)
+; AVX1-NEXT:    vmovaps %ymm2, 128(%rdx)
+; AVX1-NEXT:    vmovaps %ymm3, 160(%rdx)
+; AVX1-NEXT:    vmovaps %ymm5, 64(%rdx)
+; AVX1-NEXT:    vmovaps %ymm6, 96(%rdx)
+; AVX1-NEXT:    vmovaps %ymm4, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm8, 32(%rdx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
index 238f602986e87..02db24660c45b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
@@ -124,26 +124,45 @@ define void @load_i64_stride4_vf4(<16 x i64>* %in.vec, <4 x i64>* %out.vec0, <4
 ; SSE-NEXT:    movaps %xmm3, (%r8)
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: load_i64_stride4_vf4:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovaps (%rdi), %ymm0
-; AVX-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX-NEXT:    vmovaps 96(%rdi), %ymm3
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX-NEXT:    vmovaps %ymm4, (%rdx)
-; AVX-NEXT:    vmovaps %ymm3, (%rcx)
-; AVX-NEXT:    vmovaps %ymm0, (%r8)
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1-LABEL: load_i64_stride4_vf4:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
+; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT:    vmovaps %ymm4, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm2, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm5, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm0, (%r8)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: load_i64_stride4_vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-NEXT:    vmovaps 32(%rdi), %ymm1
+; AVX2-NEXT:    vmovaps 64(%rdi), %ymm2
+; AVX2-NEXT:    vmovaps 96(%rdi), %ymm3
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX2-NEXT:    vmovaps %ymm4, (%rdx)
+; AVX2-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm0, (%r8)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_i64_stride4_vf4:
 ; AVX512:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
index 15eaad9b0527f..77c17fb20aa7e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
@@ -115,26 +115,45 @@ define void @store_i64_stride4_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
 ; SSE-NEXT:    movaps %xmm3, 16(%r8)
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: store_i64_stride4_vf4:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovaps (%rdi), %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %ymm1
-; AVX-NEXT:    vmovaps (%rdx), %ymm2
-; AVX-NEXT:    vmovaps (%rcx), %ymm3
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT:    vmovaps %ymm0, 96(%r8)
-; AVX-NEXT:    vmovaps %ymm3, 64(%r8)
-; AVX-NEXT:    vmovaps %ymm4, 32(%r8)
-; AVX-NEXT:    vmovaps %ymm2, (%r8)
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1-LABEL: store_i64_stride4_vf4:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; AVX1-NEXT:    vmovaps (%rsi), %ymm1
+; AVX1-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm2
+; AVX1-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm3
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT:    vmovaps %ymm4, (%r8)
+; AVX1-NEXT:    vmovaps %ymm0, 96(%r8)
+; AVX1-NEXT:    vmovaps %ymm5, 64(%r8)
+; AVX1-NEXT:    vmovaps %ymm2, 32(%r8)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: store_i64_stride4_vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-NEXT:    vmovaps (%rsi), %ymm1
+; AVX2-NEXT:    vmovaps (%rdx), %ymm2
+; AVX2-NEXT:    vmovaps (%rcx), %ymm3
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-NEXT:    vmovaps %ymm0, 96(%r8)
+; AVX2-NEXT:    vmovaps %ymm3, 64(%r8)
+; AVX2-NEXT:    vmovaps %ymm4, 32(%r8)
+; AVX2-NEXT:    vmovaps %ymm2, (%r8)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_i64_stride4_vf4:
 ; AVX512:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 1a1153d0e8864..8012121c3c6a5 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -447,7 +447,7 @@ define void @PR48908(<4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x
 ; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[0,1]
 ; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm5
 ; X86-AVX1-NEXT:    vshufpd {{.*#+}} ymm4 = ymm5[1],ymm4[0],ymm5[2],ymm4[3]
-; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm0[0,1],ymm2[0,1]
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm5
 ; X86-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
 ; X86-AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2],ymm3[3]
 ; X86-AVX1-NEXT:    vmovapd %ymm3, (%edx)
@@ -520,7 +520,7 @@ define void @PR48908(<4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x
 ; X64-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[0,1]
 ; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm5
 ; X64-AVX1-NEXT:    vshufpd {{.*#+}} ymm4 = ymm5[1],ymm4[0],ymm5[2],ymm4[3]
-; X64-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm0[0,1],ymm2[0,1]
+; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm5
 ; X64-AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
 ; X64-AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2],ymm3[3]
 ; X64-AVX1-NEXT:    vmovapd %ymm3, (%rdi)

diff  --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 258b54203438b..127972d9ce710 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -4,22 +4,37 @@
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2OR512,AVX512
 
 define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
-; AVX-LABEL: load_factorf64_4:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovupd (%rdi), %ymm0
-; AVX-NEXT:    vmovupd 32(%rdi), %ymm1
-; AVX-NEXT:    vmovupd 64(%rdi), %ymm2
-; AVX-NEXT:    vmovupd 96(%rdi), %ymm3
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
-; AVX-NEXT:    vhaddpd %ymm5, %ymm4, %ymm4
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT:    vaddpd %ymm2, %ymm4, %ymm2
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: load_factorf64_4:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovupd (%rdi), %ymm0
+; AVX1-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
+; AVX1-NEXT:    vhaddpd %ymm3, %ymm2, %ymm2
+; AVX1-NEXT:    vperm2f128 $49, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vperm2f128 $49, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vaddpd %ymm3, %ymm2, %ymm2
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2OR512-LABEL: load_factorf64_4:
+; AVX2OR512:       # %bb.0:
+; AVX2OR512-NEXT:    vmovupd (%rdi), %ymm0
+; AVX2OR512-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX2OR512-NEXT:    vmovupd 64(%rdi), %ymm2
+; AVX2OR512-NEXT:    vmovupd 96(%rdi), %ymm3
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX2OR512-NEXT:    vhaddpd %ymm5, %ymm4, %ymm4
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX2OR512-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2OR512-NEXT:    vaddpd %ymm2, %ymm4, %ymm2
+; AVX2OR512-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2OR512-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
+; AVX2OR512-NEXT:    retq
   %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
   %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %strided.v1 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
@@ -32,20 +47,33 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
 }
 
 define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
-; AVX-LABEL: load_factorf64_2:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovupd (%rdi), %ymm0
-; AVX-NEXT:    vmovupd 32(%rdi), %ymm1
-; AVX-NEXT:    vmovupd 64(%rdi), %ymm2
-; AVX-NEXT:    vmovupd 96(%rdi), %ymm3
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT:    vmulpd %ymm0, %ymm4, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: load_factorf64_2:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovupd (%rdi), %ymm0
+; AVX1-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX1-NEXT:    vperm2f128 $49, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vperm2f128 $49, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT:    vmulpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2OR512-LABEL: load_factorf64_2:
+; AVX2OR512:       # %bb.0:
+; AVX2OR512-NEXT:    vmovupd (%rdi), %ymm0
+; AVX2OR512-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX2OR512-NEXT:    vmovupd 64(%rdi), %ymm2
+; AVX2OR512-NEXT:    vmovupd 96(%rdi), %ymm3
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
+; AVX2OR512-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX2OR512-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX2OR512-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2OR512-NEXT:    vmulpd %ymm0, %ymm4, %ymm0
+; AVX2OR512-NEXT:    retq
   %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
   %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
@@ -54,15 +82,25 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
 }
 
 define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
-; AVX-LABEL: load_factorf64_1:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovupd (%rdi), %ymm0
-; AVX-NEXT:    vmovupd 32(%rdi), %ymm1
-; AVX-NEXT:    vperm2f128 $32, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[0,1],mem[0,1]
-; AVX-NEXT:    vperm2f128 $32, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[0,1],mem[0,1]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT:    vmulpd %ymm0, %ymm0, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: load_factorf64_1:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovups (%rdi), %ymm0
+; AVX1-NEXT:    vmovups 32(%rdi), %ymm1
+; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vmulpd %ymm0, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2OR512-LABEL: load_factorf64_1:
+; AVX2OR512:       # %bb.0:
+; AVX2OR512-NEXT:    vmovupd (%rdi), %ymm0
+; AVX2OR512-NEXT:    vmovupd 32(%rdi), %ymm1
+; AVX2OR512-NEXT:    vperm2f128 $32, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[0,1],mem[0,1]
+; AVX2OR512-NEXT:    vperm2f128 $32, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[0,1],mem[0,1]
+; AVX2OR512-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2OR512-NEXT:    vmulpd %ymm0, %ymm0, %ymm0
+; AVX2OR512-NEXT:    retq
   %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
   %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -75,26 +113,24 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovups (%rdi), %ymm0
 ; AVX1-NEXT:    vmovups 32(%rdi), %ymm1
-; AVX1-NEXT:    vmovups 64(%rdi), %ymm2
-; AVX1-NEXT:    vmovups 96(%rdi), %ymm3
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm1, %ymm3
+; AVX1-NEXT:    vperm2f128 $49, 64(%rdi), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3]
+; AVX1-NEXT:    vperm2f128 $49, 96(%rdi), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
 ; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
-; AVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT:    vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; AVX1-NEXT:    vpaddq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpaddq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
 ; AVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list