[llvm] 5f7ac15 - Revert "[DAG]Introduce llvm::processShuffleMasks and use it for shuffles in DAG Type Legalizer."

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 20 06:37:35 PDT 2022


Author: Alexey Bataev
Date: 2022-04-20T06:35:55-07:00
New Revision: 5f7ac15912dd614ff02bd74225f3a7f4f0f485c3

URL: https://github.com/llvm/llvm-project/commit/5f7ac15912dd614ff02bd74225f3a7f4f0f485c3
DIFF: https://github.com/llvm/llvm-project/commit/5f7ac15912dd614ff02bd74225f3a7f4f0f485c3.diff

LOG: Revert "[DAG]Introduce llvm::processShuffleMasks and use it for shuffles in DAG Type Legalizer."

This reverts commit 2f49163b3365e5dc046b03e422a048dd45aee3f0 to fix
a buildbot failure. Reported in https://lab.llvm.org/buildbot#builders/105/builds/24284

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/VectorUtils.h
    llvm/lib/Analysis/VectorUtils.cpp
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/test/CodeGen/AArch64/insert-extend.ll
    llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
    llvm/test/CodeGen/AArch64/shuffles.ll
    llvm/test/CodeGen/ARM/fp16-insert-extract.ll
    llvm/test/CodeGen/PowerPC/pr27078.ll
    llvm/test/CodeGen/Thumb2/mve-shuffle.ll
    llvm/test/CodeGen/Thumb2/mve-vld3.ll
    llvm/test/CodeGen/Thumb2/mve-vst3.ll
    llvm/test/CodeGen/Thumb2/mve-vst4.ll
    llvm/test/CodeGen/X86/haddsub-4.ll
    llvm/test/CodeGen/X86/insertelement-duplicates.ll
    llvm/test/CodeGen/X86/oddshuffles.ll
    llvm/test/CodeGen/X86/pr34592.ll
    llvm/test/CodeGen/X86/pr44976.ll
    llvm/test/CodeGen/X86/splat-for-size.ll
    llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining.ll
    llvm/test/CodeGen/X86/x86-interleaved-access.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index f90c68a88578c..751c88a4ecbb6 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -398,24 +398,6 @@ void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
 bool widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
                           SmallVectorImpl<int> &ScaledMask);
 
-/// Splits and processes shuffle mask depending on the number of input and
-/// output registers. The function does 2 main things: 1) splits the
-/// source/destination vectors into real registers; 2) do the mask analysis to
-/// identify which real registers are permuted. Then the function processes
-/// resulting registers mask using provided action items. If no input register
-/// is defined, \p NoInputAction action is used. If only 1 input register is
-/// used, \p SingleInputAction is used, otherwise \p ManyInputsAction is used to
-/// process > 2 input registers and masks.
-/// \param Mask Original shuffle mask.
-/// \param NumOfSrcRegs Number of source registers.
-/// \param NumOfDestRegs Number of destination registers.
-/// \param NumOfUsedRegs Number of actually used destination registers.
-void processShuffleMasks(
-    ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
-    unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
-    function_ref<void(ArrayRef<int>, unsigned)> SingleInputAction,
-    function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction);
-
 /// Compute a map of integer instructions to their minimum legal type
 /// size.
 ///

diff  --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index ac0a3571e049f..655c248907f61 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -496,116 +496,6 @@ bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
   return true;
 }
 
-void llvm::processShuffleMasks(
-    ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
-    unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
-    function_ref<void(ArrayRef<int>, unsigned)> SingleInputAction,
-    function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
-  SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
-  // Try to perform better estimation of the permutation.
-  // 1. Split the source/destination vectors into real registers.
-  // 2. Do the mask analysis to identify which real registers are
-  // permuted.
-  int Sz = Mask.size();
-  unsigned SzDest = Sz / NumOfDestRegs;
-  unsigned SzSrc = Sz / NumOfSrcRegs;
-  for (unsigned I = 0; I < NumOfDestRegs; ++I) {
-    auto &RegMasks = Res[I];
-    RegMasks.assign(NumOfSrcRegs, {});
-    // Check that the values in dest registers are in the one src
-    // register.
-    for (unsigned K = 0; K < SzDest; ++K) {
-      int Idx = I * SzDest + K;
-      if (Idx == Sz)
-        break;
-      if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem)
-        continue;
-      int SrcRegIdx = Mask[Idx] / SzSrc;
-      // Add a cost of PermuteTwoSrc for each new source register permute,
-      // if we have more than one source registers.
-      if (RegMasks[SrcRegIdx].empty())
-        RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem);
-      RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
-    }
-  }
-  // Process split mask.
-  for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
-    auto &Dest = Res[I];
-    int NumSrcRegs =
-        count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
-    switch (NumSrcRegs) {
-    case 0:
-      // No input vectors were used!
-      NoInputAction();
-      break;
-    case 1: {
-      // Find the only mask with at least single undef mask elem.
-      auto *It =
-          find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
-      unsigned SrcReg = std::distance(Dest.begin(), It);
-      SingleInputAction(*It, SrcReg);
-      break;
-    }
-    default: {
-      // The first mask is a permutation of a single register. Since we have >2
-      // input registers to shuffle, we merge the masks for 2 first registers
-      // and generate a shuffle of 2 registers rather than the reordering of the
-      // first register and then shuffle with the second register. Next,
-      // generate the shuffles of the resulting register + the remaining
-      // registers from the list.
-      auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
-                               ArrayRef<int> SecondMask) {
-        for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
-          if (SecondMask[Idx] != UndefMaskElem) {
-            assert(FirstMask[Idx] == UndefMaskElem &&
-                   "Expected undefined mask element.");
-            FirstMask[Idx] = SecondMask[Idx] + VF;
-          }
-        }
-      };
-      auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
-        for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
-          if (Mask[Idx] != UndefMaskElem)
-            Mask[Idx] = Idx;
-        }
-      };
-      int SecondIdx;
-      do {
-        int FirstIdx = -1;
-        SecondIdx = -1;
-        MutableArrayRef<int> FirstMask, SecondMask;
-        for (unsigned I = 0; I < NumOfDestRegs; ++I) {
-          SmallVectorImpl<int> &RegMask = Dest[I];
-          if (RegMask.empty())
-            continue;
-
-          if (FirstIdx == SecondIdx) {
-            FirstIdx = I;
-            FirstMask = RegMask;
-            continue;
-          }
-          SecondIdx = I;
-          SecondMask = RegMask;
-          CombineMasks(FirstMask, SecondMask);
-          ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
-          NormalizeMask(FirstMask);
-          RegMask.clear();
-          SecondMask = FirstMask;
-          SecondIdx = FirstIdx;
-        }
-        if (FirstIdx != SecondIdx && SecondIdx >= 0) {
-          CombineMasks(SecondMask, FirstMask);
-          ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
-          Dest[FirstIdx].clear();
-          NormalizeMask(SecondMask);
-        }
-      } while (SecondIdx >= 0);
-      break;
-    }
-    }
-  }
-}
-
 MapVector<Instruction *, uint64_t>
 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
                                const TargetTransformInfo *TTI) {

diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4394418a052c5..ee0c342352031 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -20413,39 +20413,18 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
       int Left = 2 * In;
       int Right = 2 * In + 1;
       SmallVector<int, 8> Mask(NumElems, -1);
-      SDValue L = Shuffles[Left];
-      ArrayRef<int> LMask;
-      bool IsLeftShuffle = L.getOpcode() == ISD::VECTOR_SHUFFLE &&
-                           L.use_empty() && L.getOperand(1).isUndef() &&
-                           L.getOperand(0).getValueType() == L.getValueType();
-      if (IsLeftShuffle) {
-        LMask = cast<ShuffleVectorSDNode>(L.getNode())->getMask();
-        L = L.getOperand(0);
-      }
-      SDValue R = Shuffles[Right];
-      ArrayRef<int> RMask;
-      bool IsRightShuffle = R.getOpcode() == ISD::VECTOR_SHUFFLE &&
-                            R.use_empty() && R.getOperand(1).isUndef() &&
-                            R.getOperand(0).getValueType() == R.getValueType();
-      if (IsRightShuffle) {
-        RMask = cast<ShuffleVectorSDNode>(R.getNode())->getMask();
-        R = R.getOperand(0);
-      }
-      for (unsigned I = 0; I != NumElems; ++I) {
-        if (VectorMask[I] == Left) {
-          Mask[I] = I;
-          if (IsLeftShuffle)
-            Mask[I] = LMask[I];
-          VectorMask[I] = In;
-        } else if (VectorMask[I] == Right) {
-          Mask[I] = I + NumElems;
-          if (IsRightShuffle)
-            Mask[I] = RMask[I] + NumElems;
-          VectorMask[I] = In;
+      for (unsigned i = 0; i != NumElems; ++i) {
+        if (VectorMask[i] == Left) {
+          Mask[i] = i;
+          VectorMask[i] = In;
+        } else if (VectorMask[i] == Right) {
+          Mask[i] = i + NumElems;
+          VectorMask[i] = In;
         }
       }
 
-      Shuffles[In] = DAG.getVectorShuffle(VT, DL, L, R, Mask);
+      Shuffles[In] =
+          DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask);
     }
   }
   return Shuffles[0];

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 82f4d2a08078c..ef1be4e53611f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -20,9 +20,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "LegalizeTypes.h"
-#include "llvm/ADT/SmallBitVector.h"
 #include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/Analysis/VectorUtils.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/TypeSize.h"
@@ -2168,349 +2166,108 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
                                                   SDValue &Lo, SDValue &Hi) {
   // The low and high parts of the original input give four input vectors.
   SDValue Inputs[4];
-  SDLoc DL(N);
+  SDLoc dl(N);
   GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]);
   GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]);
   EVT NewVT = Inputs[0].getValueType();
   unsigned NewElts = NewVT.getVectorNumElements();
 
-  auto &&IsConstant = [](const SDValue &N) {
-    APInt SplatValue;
-    return N.getResNo() == 0 &&
-           (ISD::isConstantSplatVector(N.getNode(), SplatValue) ||
-            ISD::isBuildVectorOfConstantSDNodes(N.getNode()));
-  };
-  auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &DL](SDValue &Input1,
-                                                         SDValue &Input2,
-                                                         ArrayRef<int> Mask) {
-    assert(Input1->getOpcode() == ISD::BUILD_VECTOR &&
-           Input2->getOpcode() == ISD::BUILD_VECTOR &&
-           "Expected build vector node.");
-    SmallVector<SDValue> Ops(NewElts,
-                             DAG.getUNDEF(Input1.getOperand(0).getValueType()));
-    for (unsigned I = 0; I < NewElts; ++I) {
-      if (Mask[I] == UndefMaskElem)
-        continue;
-      unsigned Idx = Mask[I];
-      if (Idx >= NewElts)
-        Ops[I] = Input2.getOperand(Idx - NewElts);
-      else
-        Ops[I] = Input1.getOperand(Idx);
-    }
-    return DAG.getBuildVector(NewVT, DL, Ops);
-  };
-
   // If Lo or Hi uses elements from at most two of the four input vectors, then
   // express it as a vector shuffle of those two inputs.  Otherwise extract the
   // input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR.
-  SmallVector<int> OrigMask(N->getMask().begin(), N->getMask().end());
-  // Try to pack incoming shuffles/inputs.
-  auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT, this, NewElts,
-                                         &DL](SmallVectorImpl<int> &Mask) {
-    // Check if all inputs are shuffles of the same operands or non-shuffles.
-    MapVector<std::pair<SDValue, SDValue>, SmallVector<unsigned>> ShufflesIdxs;
-    for (unsigned Idx = 0; Idx < array_lengthof(Inputs); ++Idx) {
-      SDValue Input = Inputs[Idx];
-      auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.getNode());
-      if (!Shuffle ||
-          Input.getOperand(0).getValueType() != Input.getValueType())
-        continue;
-      ShufflesIdxs[std::make_pair(Input.getOperand(0), Input.getOperand(1))]
-          .push_back(Idx);
-      ShufflesIdxs[std::make_pair(Input.getOperand(1), Input.getOperand(0))]
-          .push_back(Idx);
-    }
-    for (auto &P : ShufflesIdxs) {
-      if (P.second.size() < 2)
-        continue;
-      // Use shuffles operands instead of shuffles themselves.
-      // 1. Adjust mask.
-      for (int &Idx : Mask) {
-        if (Idx == UndefMaskElem)
-          continue;
-        unsigned SrcRegIdx = Idx / NewElts;
-        if (Inputs[SrcRegIdx].isUndef()) {
-          Idx = UndefMaskElem;
-          continue;
-        }
-        auto *Shuffle =
-            dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
-        if (!Shuffle || !is_contained(P.second, SrcRegIdx))
-          continue;
-        int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
-        if (MaskElt == UndefMaskElem) {
-          Idx = UndefMaskElem;
-          continue;
-        }
-        Idx = MaskElt % NewElts +
-              P.second[Shuffle->getOperand(MaskElt / NewElts) == P.first.first
-                           ? 0
-                           : 1] *
-                  NewElts;
-      }
-      // 2. Update inputs.
-      Inputs[P.second[0]] = P.first.first;
-      Inputs[P.second[1]] = P.first.second;
-      // Clear the pair data.
-      P.second.clear();
-      ShufflesIdxs[std::make_pair(P.first.second, P.first.first)].clear();
-    }
-    // Check if any concat_vectors can be simplified.
-    SmallBitVector UsedSubVector(2 * array_lengthof(Inputs));
-    for (int &Idx : Mask) {
-      if (Idx == UndefMaskElem)
-        continue;
-      unsigned SrcRegIdx = Idx / NewElts;
-      if (Inputs[SrcRegIdx].isUndef()) {
-        Idx = UndefMaskElem;
+  SmallVector<int, 16> Ops;
+  for (unsigned High = 0; High < 2; ++High) {
+    SDValue &Output = High ? Hi : Lo;
+
+    // Build a shuffle mask for the output, discovering on the fly which
+    // input vectors to use as shuffle operands (recorded in InputUsed).
+    // If building a suitable shuffle vector proves too hard, then bail
+    // out with useBuildVector set.
+    unsigned InputUsed[2] = { -1U, -1U }; // Not yet discovered.
+    unsigned FirstMaskIdx = High * NewElts;
+    bool useBuildVector = false;
+    for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
+      // The mask element.  This indexes into the input.
+      int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset);
+
+      // The input vector this mask element indexes into.
+      unsigned Input = (unsigned)Idx / NewElts;
+
+      if (Input >= array_lengthof(Inputs)) {
+        // The mask element does not index into any input vector.
+        Ops.push_back(-1);
         continue;
       }
-      TargetLowering::LegalizeTypeAction TypeAction =
-          getTypeAction(Inputs[SrcRegIdx].getValueType());
-      if (Inputs[SrcRegIdx].getOpcode() == ISD::CONCAT_VECTORS &&
-          Inputs[SrcRegIdx].getNumOperands() == 2 &&
-          !Inputs[SrcRegIdx].getOperand(1).isUndef() &&
-          (TypeAction == TargetLowering::TypeLegal ||
-           TypeAction == TargetLowering::TypeWidenVector))
-        UsedSubVector.set(2 * SrcRegIdx + (Idx % NewElts) / (NewElts / 2));
-    }
-    if (UsedSubVector.count() > 1) {
-      SmallVector<SmallVector<std::pair<unsigned, int>, 2>> Pairs;
-      for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
-        if (UsedSubVector.test(2 * I) == UsedSubVector.test(2 * I + 1))
-          continue;
-        if (Pairs.empty() || Pairs.back().size() == 2)
-          Pairs.emplace_back();
-        if (UsedSubVector.test(2 * I)) {
-          Pairs.back().emplace_back(I, 0);
-        } else {
-          assert(UsedSubVector.test(2 * I + 1) &&
-                 "Expected to be used one of the subvectors.");
-          Pairs.back().emplace_back(I, 1);
-        }
-      }
-      if (!Pairs.empty() && Pairs.front().size() > 1) {
-        // Adjust mask.
-        for (int &Idx : Mask) {
-          if (Idx == UndefMaskElem)
-            continue;
-          unsigned SrcRegIdx = Idx / NewElts;
-          auto *It = find_if(
-              Pairs, [SrcRegIdx](ArrayRef<std::pair<unsigned, int>> Idxs) {
-                return Idxs.front().first == SrcRegIdx ||
-                       Idxs.back().first == SrcRegIdx;
-              });
-          if (It == Pairs.end())
-            continue;
-          Idx = It->front().first * NewElts + (Idx % NewElts) % (NewElts / 2) +
-                (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
-        }
-        // Adjust inputs.
-        for (ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
-          Inputs[Idxs.front().first] = DAG.getNode(
-              ISD::CONCAT_VECTORS, DL,
-              Inputs[Idxs.front().first].getValueType(),
-              Inputs[Idxs.front().first].getOperand(Idxs.front().second),
-              Inputs[Idxs.back().first].getOperand(Idxs.back().second));
+
+      // Turn the index into an offset from the start of the input vector.
+      Idx -= Input * NewElts;
+
+      // Find or create a shuffle vector operand to hold this input.
+      unsigned OpNo;
+      for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
+        if (InputUsed[OpNo] == Input) {
+          // This input vector is already an operand.
+          break;
+        } else if (InputUsed[OpNo] == -1U) {
+          // Create a new operand for this input vector.
+          InputUsed[OpNo] = Input;
+          break;
         }
       }
-    }
-    bool Changed;
-    do {
-      // Try to remove extra shuffles (except broadcasts) and shuffles with the
-      // reused operands.
-      Changed = false;
-      for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
-        auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[I].getNode());
-        if (!Shuffle)
-          continue;
-        if (Shuffle->getOperand(0).getValueType() != NewVT)
-          continue;
-        int Op = -1;
-        if (!Inputs[I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
-            !Shuffle->isSplat()) {
-          Op = 0;
-        } else if (!Inputs[I].hasOneUse() &&
-                   !Shuffle->getOperand(1).isUndef()) {
-          // Find the only used operand, if possible.
-          for (int &Idx : Mask) {
-            if (Idx == UndefMaskElem)
-              continue;
-            unsigned SrcRegIdx = Idx / NewElts;
-            if (SrcRegIdx != I)
-              continue;
-            int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
-            if (MaskElt == UndefMaskElem) {
-              Idx = UndefMaskElem;
-              continue;
-            }
-            int OpIdx = MaskElt / NewElts;
-            if (Op == -1) {
-              Op = OpIdx;
-              continue;
-            }
-            if (Op != OpIdx) {
-              Op = -1;
-              break;
-            }
-          }
-        }
-        if (Op < 0) {
-          // Try to check if one of the shuffle operands is used already.
-          for (int OpIdx = 0; OpIdx < 2; ++OpIdx) {
-            if (Shuffle->getOperand(OpIdx).isUndef())
-              continue;
-            auto *It = find(Inputs, Shuffle->getOperand(OpIdx));
-            if (It == std::end(Inputs))
-              continue;
-            int FoundOp = std::distance(std::begin(Inputs), It);
-            // Found that operand is used already.
-            // 1. Fix the mask for the reused operand.
-            for (int &Idx : Mask) {
-              if (Idx == UndefMaskElem)
-                continue;
-              unsigned SrcRegIdx = Idx / NewElts;
-              if (SrcRegIdx != I)
-                continue;
-              int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
-              if (MaskElt == UndefMaskElem) {
-                Idx = UndefMaskElem;
-                continue;
-              }
-              int MaskIdx = MaskElt / NewElts;
-              if (OpIdx == MaskIdx)
-                Idx = MaskElt % NewElts + FoundOp * NewElts;
-            }
-            // 2. Set Op to the unused OpIdx.
-            Op = (OpIdx + 1) % 2;
-            break;
-          }
-        }
-        if (Op >= 0) {
-          Changed = true;
-          Inputs[I] = Shuffle->getOperand(Op);
-          // Adjust mask.
-          for (int &Idx : Mask) {
-            if (Idx == UndefMaskElem)
-              continue;
-            unsigned SrcRegIdx = Idx / NewElts;
-            if (SrcRegIdx != I)
-              continue;
-            int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
-            int OpIdx = MaskElt / NewElts;
-            if (OpIdx != Op)
-              continue;
-            Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
-          }
-        }
+
+      if (OpNo >= array_lengthof(InputUsed)) {
+        // More than two input vectors used!  Give up on trying to create a
+        // shuffle vector.  Insert all elements into a BUILD_VECTOR instead.
+        useBuildVector = true;
+        break;
       }
-    } while (Changed);
-  };
-  TryPeekThroughShufflesInputs(OrigMask);
-  // Proces unique inputs.
-  auto &&MakeUniqueInputs = [&Inputs, &IsConstant,
-                             NewElts](SmallVectorImpl<int> &Mask) {
-    SetVector<SDValue> UniqueInputs;
-    SetVector<SDValue> UniqueConstantInputs;
-    for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
-      if (IsConstant(Inputs[I]))
-        UniqueConstantInputs.insert(Inputs[I]);
-      else if (!Inputs[I].isUndef())
-        UniqueInputs.insert(Inputs[I]);
+
+      // Add the mask index for the new shuffle vector.
+      Ops.push_back(Idx + OpNo * NewElts);
     }
-    // Adjust mask in case of reused inputs. Also, need to insert constant
-    // inputs at first, otherwise it affects the final outcome.
-    if (UniqueInputs.size() != array_lengthof(Inputs)) {
-      auto &&UniqueVec = UniqueInputs.takeVector();
-      auto &&UniqueConstantVec = UniqueConstantInputs.takeVector();
-      unsigned ConstNum = UniqueConstantVec.size();
-      for (int &Idx : Mask) {
-        if (Idx == UndefMaskElem)
-          continue;
-        unsigned SrcRegIdx = Idx / NewElts;
-        if (Inputs[SrcRegIdx].isUndef()) {
-          Idx = UndefMaskElem;
-          continue;
-        }
-        const auto It = find(UniqueConstantVec, Inputs[SrcRegIdx]);
-        if (It != UniqueConstantVec.end()) {
-          Idx = (Idx % NewElts) +
-                NewElts * std::distance(UniqueConstantVec.begin(), It);
-          assert(Idx >= 0 && "Expected defined mask idx.");
+
+    if (useBuildVector) {
+      EVT EltVT = NewVT.getVectorElementType();
+      SmallVector<SDValue, 16> SVOps;
+
+      // Extract the input elements by hand.
+      for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
+        // The mask element.  This indexes into the input.
+        int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset);
+
+        // The input vector this mask element indexes into.
+        unsigned Input = (unsigned)Idx / NewElts;
+
+        if (Input >= array_lengthof(Inputs)) {
+          // The mask element is "undef" or indexes off the end of the input.
+          SVOps.push_back(DAG.getUNDEF(EltVT));
           continue;
         }
-        const auto RegIt = find(UniqueVec, Inputs[SrcRegIdx]);
-        assert(RegIt != UniqueVec.end() && "Cannot find non-const value.");
-        Idx = (Idx % NewElts) +
-              NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
-        assert(Idx >= 0 && "Expected defined mask idx.");
+
+        // Turn the index into an offset from the start of the input vector.
+        Idx -= Input * NewElts;
+
+        // Extract the vector element by hand.
+        SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
+                                    Inputs[Input],
+                                    DAG.getVectorIdxConstant(Idx, dl)));
       }
-      copy(UniqueConstantVec, std::begin(Inputs));
-      copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
+
+      // Construct the Lo/Hi output using a BUILD_VECTOR.
+      Output = DAG.getBuildVector(NewVT, dl, SVOps);
+    } else if (InputUsed[0] == -1U) {
+      // No input vectors were used!  The result is undefined.
+      Output = DAG.getUNDEF(NewVT);
+    } else {
+      SDValue Op0 = Inputs[InputUsed[0]];
+      // If only one input was used, use an undefined vector for the other.
+      SDValue Op1 = InputUsed[1] == -1U ?
+        DAG.getUNDEF(NewVT) : Inputs[InputUsed[1]];
+      // At least one input vector was used.  Create a new shuffle vector.
+      Output =  DAG.getVectorShuffle(NewVT, dl, Op0, Op1, Ops);
     }
-  };
-  MakeUniqueInputs(OrigMask);
-  SDValue OrigInputs[4];
-  copy(Inputs, std::begin(OrigInputs));
-  for (unsigned High = 0; High < 2; ++High) {
-    SDValue &Output = High ? Hi : Lo;
 
-    // Build a shuffle mask for the output, discovering on the fly which
-    // input vectors to use as shuffle operands.
-    unsigned FirstMaskIdx = High * NewElts;
-    SmallVector<int> Mask(NewElts * array_lengthof(Inputs), UndefMaskElem);
-    copy(makeArrayRef(OrigMask).slice(FirstMaskIdx, NewElts), Mask.begin());
-    assert(!Output && "Expected default initialized initial value.");
-    TryPeekThroughShufflesInputs(Mask);
-    MakeUniqueInputs(Mask);
-    SDValue TmpInputs[4];
-    copy(Inputs, std::begin(TmpInputs));
-    // Track changes in the output registers.
-    int UsedIdx = -1;
-    bool SecondIteration = false;
-    auto &&AccumulateResults = [&UsedIdx, &SecondIteration](unsigned Idx) {
-      if (UsedIdx < 0) {
-        UsedIdx = Idx;
-        return false;
-      }
-      if (UsedIdx >= 0 && static_cast<unsigned>(UsedIdx) == Idx)
-        SecondIteration = true;
-      return SecondIteration;
-    };
-    processShuffleMasks(
-        Mask, array_lengthof(Inputs), array_lengthof(Inputs),
-        /*NumOfUsedRegs=*/1,
-        [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
-        [&Output, &DAG = DAG, NewVT, &DL, &Inputs,
-         &BuildVector](ArrayRef<int> Mask, unsigned Idx) {
-          if (Inputs[Idx]->getOpcode() == ISD::BUILD_VECTOR)
-            Output = BuildVector(Inputs[Idx], Inputs[Idx], Mask);
-          else
-            Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx],
-                                          DAG.getUNDEF(NewVT), Mask);
-          Inputs[Idx] = Output;
-        },
-        [&AccumulateResults, &Output, &DAG = DAG, NewVT, &DL, &Inputs,
-         &TmpInputs,
-         &BuildVector](ArrayRef<int> Mask, unsigned Idx1, unsigned Idx2) {
-          if (AccumulateResults(Idx1)) {
-            if (Inputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR &&
-                Inputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR)
-              Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
-            else
-              Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx1],
-                                            Inputs[Idx2], Mask);
-          } else {
-            if (TmpInputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR &&
-                TmpInputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR)
-              Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
-            else
-              Output = DAG.getVectorShuffle(NewVT, DL, TmpInputs[Idx1],
-                                            TmpInputs[Idx2], Mask);
-          }
-          Inputs[Idx1] = Output;
-        });
-    copy(OrigInputs, std::begin(Inputs));
+    Ops.clear();
   }
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/insert-extend.ll b/llvm/test/CodeGen/AArch64/insert-extend.ll
index 1ea753e43d746..8836123a86122 100644
--- a/llvm/test/CodeGen/AArch64/insert-extend.ll
+++ b/llvm/test/CodeGen/AArch64/insert-extend.ll
@@ -104,104 +104,104 @@ define i32 @large(i8* nocapture noundef readonly %p1, i32 noundef %st1, i8* noca
 ; CHECK-NEXT:    rev64 v5.4s, v2.4s
 ; CHECK-NEXT:    add v16.4s, v0.4s, v7.4s
 ; CHECK-NEXT:    add v17.4s, v3.4s, v6.4s
-; CHECK-NEXT:    sub v3.4s, v3.4s, v6.4s
-; CHECK-NEXT:    uzp2 v6.4s, v17.4s, v16.4s
-; CHECK-NEXT:    add v19.4s, v2.4s, v5.4s
-; CHECK-NEXT:    add v20.4s, v1.4s, v4.4s
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v7.4s
-; CHECK-NEXT:    trn2 v18.4s, v17.4s, v16.4s
+; CHECK-NEXT:    sub v3.4s, v3.4s, v6.4s
+; CHECK-NEXT:    uzp2 v7.4s, v17.4s, v16.4s
+; CHECK-NEXT:    zip2 v18.4s, v0.4s, v3.4s
+; CHECK-NEXT:    zip1 v0.4s, v0.4s, v3.4s
+; CHECK-NEXT:    uzp2 v3.4s, v16.4s, v17.4s
+; CHECK-NEXT:    add v20.4s, v2.4s, v5.4s
+; CHECK-NEXT:    add v21.4s, v1.4s, v4.4s
 ; CHECK-NEXT:    sub v2.4s, v2.4s, v5.4s
 ; CHECK-NEXT:    sub v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    uzp2 v4.4s, v6.4s, v17.4s
-; CHECK-NEXT:    zip1 v5.4s, v20.4s, v19.4s
-; CHECK-NEXT:    zip2 v6.4s, v20.4s, v19.4s
-; CHECK-NEXT:    zip2 v7.4s, v0.4s, v3.4s
-; CHECK-NEXT:    mov v0.s[1], v3.s[0]
-; CHECK-NEXT:    ext v3.16b, v17.16b, v17.16b, #12
-; CHECK-NEXT:    zip1 v19.4s, v1.4s, v2.4s
-; CHECK-NEXT:    mov v4.d[1], v6.d[1]
-; CHECK-NEXT:    mov v18.d[1], v5.d[1]
-; CHECK-NEXT:    ext v3.16b, v16.16b, v3.16b, #12
-; CHECK-NEXT:    mov v17.s[0], v16.s[1]
-; CHECK-NEXT:    ext v16.16b, v1.16b, v19.16b, #8
+; CHECK-NEXT:    trn2 v6.4s, v16.4s, v17.4s
+; CHECK-NEXT:    trn2 v19.4s, v17.4s, v16.4s
+; CHECK-NEXT:    zip1 v4.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp2 v5.4s, v7.4s, v17.4s
+; CHECK-NEXT:    zip2 v7.4s, v21.4s, v20.4s
+; CHECK-NEXT:    zip1 v17.4s, v1.4s, v2.4s
+; CHECK-NEXT:    uzp2 v3.4s, v3.4s, v16.4s
+; CHECK-NEXT:    mov v6.d[1], v4.d[1]
+; CHECK-NEXT:    mov v5.d[1], v7.d[1]
+; CHECK-NEXT:    ext v16.16b, v1.16b, v17.16b, #8
+; CHECK-NEXT:    mov v3.d[1], v7.d[1]
+; CHECK-NEXT:    mov v19.d[1], v4.d[1]
 ; CHECK-NEXT:    mov v1.s[3], v2.s[2]
-; CHECK-NEXT:    add v2.4s, v4.4s, v18.4s
-; CHECK-NEXT:    mov v3.d[1], v6.d[1]
-; CHECK-NEXT:    mov v17.d[1], v5.d[1]
 ; CHECK-NEXT:    mov v0.d[1], v16.d[1]
+; CHECK-NEXT:    sub v2.4s, v6.4s, v3.4s
+; CHECK-NEXT:    add v3.4s, v5.4s, v19.4s
+; CHECK-NEXT:    mov v18.d[1], v1.d[1]
+; CHECK-NEXT:    rev64 v5.4s, v3.4s
 ; CHECK-NEXT:    rev64 v4.4s, v2.4s
-; CHECK-NEXT:    mov v7.d[1], v1.d[1]
-; CHECK-NEXT:    sub v3.4s, v17.4s, v3.4s
-; CHECK-NEXT:    add v5.4s, v2.4s, v4.4s
+; CHECK-NEXT:    sub v7.4s, v0.4s, v18.4s
+; CHECK-NEXT:    add v0.4s, v18.4s, v0.4s
+; CHECK-NEXT:    add v6.4s, v3.4s, v5.4s
+; CHECK-NEXT:    rev64 v16.4s, v7.4s
+; CHECK-NEXT:    rev64 v17.4s, v0.4s
+; CHECK-NEXT:    sub v3.4s, v3.4s, v5.4s
+; CHECK-NEXT:    rev64 v5.4s, v6.4s
+; CHECK-NEXT:    add v1.4s, v2.4s, v4.4s
+; CHECK-NEXT:    add v18.4s, v7.4s, v16.4s
+; CHECK-NEXT:    add v19.4s, v0.4s, v17.4s
+; CHECK-NEXT:    sub v7.4s, v7.4s, v16.4s
+; CHECK-NEXT:    sub v0.4s, v0.4s, v17.4s
 ; CHECK-NEXT:    sub v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    sub v4.4s, v0.4s, v7.4s
-; CHECK-NEXT:    add v0.4s, v7.4s, v0.4s
-; CHECK-NEXT:    rev64 v1.4s, v3.4s
-; CHECK-NEXT:    rev64 v6.4s, v4.4s
-; CHECK-NEXT:    rev64 v7.4s, v0.4s
-; CHECK-NEXT:    rev64 v16.4s, v5.4s
-; CHECK-NEXT:    add v17.4s, v3.4s, v1.4s
-; CHECK-NEXT:    add v18.4s, v4.4s, v6.4s
-; CHECK-NEXT:    add v19.4s, v0.4s, v7.4s
-; CHECK-NEXT:    sub v4.4s, v4.4s, v6.4s
-; CHECK-NEXT:    sub v0.4s, v0.4s, v7.4s
-; CHECK-NEXT:    sub v1.4s, v3.4s, v1.4s
-; CHECK-NEXT:    trn2 v3.4s, v16.4s, v2.4s
-; CHECK-NEXT:    ext v6.16b, v17.16b, v1.16b, #4
-; CHECK-NEXT:    ext v7.16b, v19.16b, v0.16b, #4
-; CHECK-NEXT:    ext v16.16b, v18.16b, v4.16b, #4
-; CHECK-NEXT:    ext v5.16b, v5.16b, v5.16b, #4
-; CHECK-NEXT:    rev64 v6.4s, v6.4s
-; CHECK-NEXT:    rev64 v7.4s, v7.4s
+; CHECK-NEXT:    trn2 v4.4s, v5.4s, v3.4s
+; CHECK-NEXT:    ext v5.16b, v2.16b, v1.16b, #12
+; CHECK-NEXT:    ext v16.16b, v0.16b, v19.16b, #12
+; CHECK-NEXT:    ext v17.16b, v7.16b, v18.16b, #12
+; CHECK-NEXT:    ext v6.16b, v6.16b, v6.16b, #4
+; CHECK-NEXT:    rev64 v5.4s, v5.4s
 ; CHECK-NEXT:    rev64 v16.4s, v16.4s
-; CHECK-NEXT:    mov v17.s[3], v1.s[3]
+; CHECK-NEXT:    rev64 v17.4s, v17.4s
+; CHECK-NEXT:    mov v1.s[3], v2.s[3]
 ; CHECK-NEXT:    mov v19.s[3], v0.s[3]
-; CHECK-NEXT:    mov v18.s[3], v4.s[3]
-; CHECK-NEXT:    ext v7.16b, v0.16b, v7.16b, #12
-; CHECK-NEXT:    ext v16.16b, v4.16b, v16.16b, #12
-; CHECK-NEXT:    ext v6.16b, v1.16b, v6.16b, #12
-; CHECK-NEXT:    trn2 v2.4s, v2.4s, v5.4s
-; CHECK-NEXT:    sub v20.4s, v19.4s, v7.4s
-; CHECK-NEXT:    sub v21.4s, v18.4s, v16.4s
-; CHECK-NEXT:    sub v5.4s, v17.4s, v6.4s
-; CHECK-NEXT:    mov v18.s[0], v4.s[0]
+; CHECK-NEXT:    mov v18.s[3], v7.s[3]
+; CHECK-NEXT:    ext v16.16b, v16.16b, v0.16b, #4
+; CHECK-NEXT:    ext v17.16b, v17.16b, v7.16b, #4
+; CHECK-NEXT:    ext v5.16b, v5.16b, v2.16b, #4
+; CHECK-NEXT:    trn2 v3.4s, v3.4s, v6.4s
+; CHECK-NEXT:    sub v20.4s, v19.4s, v16.4s
+; CHECK-NEXT:    sub v21.4s, v18.4s, v17.4s
+; CHECK-NEXT:    sub v6.4s, v1.4s, v5.4s
+; CHECK-NEXT:    mov v18.s[0], v7.s[0]
 ; CHECK-NEXT:    mov v19.s[0], v0.s[0]
-; CHECK-NEXT:    ext v0.16b, v2.16b, v2.16b, #4
-; CHECK-NEXT:    mov v17.s[0], v1.s[0]
-; CHECK-NEXT:    add v1.4s, v18.4s, v16.4s
-; CHECK-NEXT:    add v2.4s, v19.4s, v7.4s
-; CHECK-NEXT:    add v4.4s, v3.4s, v0.4s
-; CHECK-NEXT:    sub v0.4s, v3.4s, v0.4s
-; CHECK-NEXT:    add v3.4s, v17.4s, v6.4s
-; CHECK-NEXT:    mov v4.d[1], v0.d[1]
-; CHECK-NEXT:    mov v3.d[1], v5.d[1]
-; CHECK-NEXT:    mov v1.d[1], v21.d[1]
-; CHECK-NEXT:    mov v2.d[1], v20.d[1]
+; CHECK-NEXT:    ext v0.16b, v3.16b, v3.16b, #4
+; CHECK-NEXT:    mov v1.s[0], v2.s[0]
+; CHECK-NEXT:    add v2.4s, v18.4s, v17.4s
+; CHECK-NEXT:    add v3.4s, v19.4s, v16.4s
+; CHECK-NEXT:    add v7.4s, v4.4s, v0.4s
+; CHECK-NEXT:    sub v0.4s, v4.4s, v0.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v5.4s
+; CHECK-NEXT:    mov v7.d[1], v0.d[1]
+; CHECK-NEXT:    mov v1.d[1], v6.d[1]
+; CHECK-NEXT:    mov v2.d[1], v21.d[1]
+; CHECK-NEXT:    mov v3.d[1], v20.d[1]
 ; CHECK-NEXT:    movi v0.8h, #1
 ; CHECK-NEXT:    movi v17.2d, #0x00ffff0000ffff
-; CHECK-NEXT:    ushr v5.4s, v1.4s, #15
-; CHECK-NEXT:    ushr v6.4s, v4.4s, #15
-; CHECK-NEXT:    ushr v7.4s, v2.4s, #15
-; CHECK-NEXT:    ushr v16.4s, v3.4s, #15
-; CHECK-NEXT:    and v6.16b, v6.16b, v0.16b
+; CHECK-NEXT:    ushr v4.4s, v2.4s, #15
+; CHECK-NEXT:    ushr v5.4s, v7.4s, #15
+; CHECK-NEXT:    ushr v6.4s, v3.4s, #15
+; CHECK-NEXT:    ushr v16.4s, v1.4s, #15
+; CHECK-NEXT:    and v5.16b, v5.16b, v0.16b
 ; CHECK-NEXT:    and v16.16b, v16.16b, v0.16b
-; CHECK-NEXT:    and v7.16b, v7.16b, v0.16b
-; CHECK-NEXT:    and v0.16b, v5.16b, v0.16b
-; CHECK-NEXT:    mul v5.4s, v6.4s, v17.4s
-; CHECK-NEXT:    mul v6.4s, v16.4s, v17.4s
+; CHECK-NEXT:    and v6.16b, v6.16b, v0.16b
+; CHECK-NEXT:    and v0.16b, v4.16b, v0.16b
+; CHECK-NEXT:    mul v4.4s, v5.4s, v17.4s
+; CHECK-NEXT:    mul v5.4s, v16.4s, v17.4s
 ; CHECK-NEXT:    mul v0.4s, v0.4s, v17.4s
-; CHECK-NEXT:    mul v7.4s, v7.4s, v17.4s
-; CHECK-NEXT:    add v4.4s, v5.4s, v4.4s
+; CHECK-NEXT:    mul v6.4s, v6.4s, v17.4s
+; CHECK-NEXT:    add v7.4s, v4.4s, v7.4s
+; CHECK-NEXT:    add v1.4s, v5.4s, v1.4s
+; CHECK-NEXT:    add v2.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    add v3.4s, v6.4s, v3.4s
-; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add v2.4s, v7.4s, v2.4s
-; CHECK-NEXT:    eor v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    eor v1.16b, v2.16b, v7.16b
+; CHECK-NEXT:    eor v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    eor v2.16b, v3.16b, v6.16b
-; CHECK-NEXT:    eor v3.16b, v4.16b, v5.16b
-; CHECK-NEXT:    add v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    eor v1.16b, v1.16b, v5.16b
+; CHECK-NEXT:    eor v3.16b, v7.16b, v4.16b
+; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
 ; CHECK-NEXT:    add v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    addv s0, v0.4s
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    lsr w9, w8, #16

diff  --git a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
index 54bf5a89d5c03..ceed6391239aa 100644
--- a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
+++ b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
@@ -519,7 +519,8 @@ define <4 x i32> @shuffle3_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    zip1 v0.4s, v0.4s, v0.4s
 ; CHECK-NEXT:    mov v0.s[1], v1.s[0]
-; CHECK-NEXT:    mov v0.s[2], v2.s[0]
+; CHECK-NEXT:    dup v1.4s, v2.s[0]
+; CHECK-NEXT:    mov v0.s[2], v1.s[2]
 ; CHECK-NEXT:    ret
   %x = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %y = shufflevector <4 x i32> %c, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -592,53 +593,73 @@ define <8 x i8> @insert4_v8i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8>
 }
 
 ; CHECK: .LCPI15_0:
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 15                              // 0xf
-; CHECK: .byte 27                              // 0x1b
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 24                              // 0x18
-; CHECK: .byte 12                              // 0xc
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 15                              // 0xf
-; CHECK: .byte 27                              // 0x1b
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 255                             // 0xff
-; CHECK: .byte 24                              // 0x18
-; CHECK: .byte 12                              // 0xc
+; CHECK: 	.byte	4                               // 0x4
+; CHECK: 	.byte	8                               // 0x8
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	14                              // 0xe
+; CHECK: 	.byte	3                               // 0x3
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	4                               // 0x4
+; CHECK: 	.byte	8                               // 0x8
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	14                              // 0xe
+; CHECK: 	.byte	3                               // 0x3
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
 ; CHECK: .LCPI15_1:
-; CHECK: .byte 20                              // 0x14
-; CHECK: .byte 24                              // 0x18
-; CHECK: .byte 2                               // 0x2
-; CHECK: .byte 3                               // 0x3
-; CHECK: .byte 30                              // 0x1e
-; CHECK: .byte 19                              // 0x13
-; CHECK: .byte 6                               // 0x6
-; CHECK: .byte 7                               // 0x7
-; CHECK: .byte 20                              // 0x14
-; CHECK: .byte 24                              // 0x18
-; CHECK: .byte 10                              // 0xa
-; CHECK: .byte 11                              // 0xb
-; CHECK: .byte 30                              // 0x1e
-; CHECK: .byte 19                              // 0x13
-; CHECK: .byte 14                              // 0xe
-; CHECK: .byte 15                              // 0xf
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	15                              // 0xf
+; CHECK: 	.byte	27                              // 0x1b
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	24                              // 0x18
+; CHECK: 	.byte	12                              // 0xc
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	15                              // 0xf
+; CHECK: 	.byte	27                              // 0x1b
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	255                             // 0xff
+; CHECK: 	.byte	24                              // 0x18
+; CHECK: 	.byte	12                              // 0xc
+; CHECK: .LCPI15_2:
+; CHECK: 	.byte	16                              // 0x10
+; CHECK: 	.byte	17                              // 0x11
+; CHECK: 	.byte	2                               // 0x2
+; CHECK: 	.byte	3                               // 0x3
+; CHECK: 	.byte	20                              // 0x14
+; CHECK: 	.byte	21                              // 0x15
+; CHECK: 	.byte	6                               // 0x6
+; CHECK: 	.byte	7                               // 0x7
+; CHECK: 	.byte	24                              // 0x18
+; CHECK: 	.byte	25                              // 0x19
+; CHECK: 	.byte	10                              // 0xa
+; CHECK: 	.byte	11                              // 0xb
+; CHECK: 	.byte	28                              // 0x1c
+; CHECK: 	.byte	29                              // 0x1d
+; CHECK: 	.byte	14                              // 0xe
+; CHECK: 	.byte	15                              // 0xf
 define <16 x i8> @insert4_v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8> %d) {
 ; CHECK-LABEL: insert4_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, .LCPI15_0
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q31_q0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x9, .LCPI15_1
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    mov v0.d[1], v2.d[0]
 ; CHECK-NEXT:    mov v4.16b, v3.16b
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT:    adrp x8, .LCPI15_2
+; CHECK-NEXT:    ldr q5, [x9, :lo12:.LCPI15_1]
 ; CHECK-NEXT:    mov v3.16b, v1.16b
-; CHECK-NEXT:    ldr q5, [x8, :lo12:.LCPI15_0]
-; CHECK-NEXT:    adrp x8, .LCPI15_1
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
-; CHECK-NEXT:    tbl v31.16b, { v3.16b, v4.16b }, v5.16b
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI15_1]
-; CHECK-NEXT:    tbl v0.16b, { v31.16b, v0.16b }, v1.16b
+; CHECK-NEXT:    tbl v1.16b, { v0.16b }, v2.16b
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI15_2]
+; CHECK-NEXT:    tbl v0.16b, { v3.16b, v4.16b }, v5.16b
+; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v2.16b
 ; CHECK-NEXT:    ret
   %e1 = extractelement <8 x i8> %a, i32 4
   %e2 = extractelement <8 x i8> %c, i32 0

diff  --git a/llvm/test/CodeGen/AArch64/shuffles.ll b/llvm/test/CodeGen/AArch64/shuffles.ll
index d088cbfdbe575..d39ba96aee587 100644
--- a/llvm/test/CodeGen/AArch64/shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/shuffles.ll
@@ -4,19 +4,20 @@
 define <16 x i32> @test_shuf1(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: test_shuf1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    dup v3.4s, v4.s[0]
-; CHECK-NEXT:    ext v5.16b, v6.16b, v1.16b, #4
 ; CHECK-NEXT:    uzp1 v16.4s, v1.4s, v0.4s
+; CHECK-NEXT:    ext v3.16b, v6.16b, v4.16b, #12
+; CHECK-NEXT:    zip2 v6.4s, v7.4s, v6.4s
 ; CHECK-NEXT:    uzp2 v17.4s, v2.4s, v4.4s
-; CHECK-NEXT:    mov v3.s[0], v6.s[3]
-; CHECK-NEXT:    trn2 v4.4s, v1.4s, v5.4s
-; CHECK-NEXT:    trn2 v1.4s, v16.4s, v1.4s
+; CHECK-NEXT:    trn2 v16.4s, v16.4s, v1.4s
+; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #4
+; CHECK-NEXT:    trn2 v4.4s, v7.4s, v6.4s
+; CHECK-NEXT:    rev64 v5.4s, v7.4s
 ; CHECK-NEXT:    trn1 v2.4s, v17.4s, v2.4s
-; CHECK-NEXT:    mov v3.s[2], v7.s[3]
-; CHECK-NEXT:    mov v4.s[0], v7.s[1]
-; CHECK-NEXT:    ext v1.16b, v0.16b, v1.16b, #12
-; CHECK-NEXT:    mov v2.s[3], v7.s[0]
-; CHECK-NEXT:    mov v3.s[3], v7.s[2]
+; CHECK-NEXT:    dup v6.4s, v7.s[0]
+; CHECK-NEXT:    mov v4.d[1], v1.d[1]
+; CHECK-NEXT:    mov v3.d[1], v5.d[1]
+; CHECK-NEXT:    ext v1.16b, v0.16b, v16.16b, #12
+; CHECK-NEXT:    mov v2.s[3], v6.s[3]
 ; CHECK-NEXT:    mov v0.16b, v4.16b
 ; CHECK-NEXT:    ret
   %s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <16 x i32> <i32 29, i32 26, i32 7, i32 4, i32 3, i32 6, i32 5, i32 2, i32 9, i32 8, i32 17, i32 28, i32 27, i32 16, i32 31, i32 30>
@@ -27,9 +28,9 @@ define <4 x i32> @test_shuf2(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: test_shuf2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    zip2 v0.4s, v7.4s, v6.4s
+; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #4
 ; CHECK-NEXT:    trn2 v0.4s, v7.4s, v0.4s
-; CHECK-NEXT:    mov v0.s[2], v1.s[3]
-; CHECK-NEXT:    mov v0.s[3], v1.s[0]
+; CHECK-NEXT:    mov v0.d[1], v1.d[1]
 ; CHECK-NEXT:    ret
   %s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <4 x i32> <i32 29, i32 26, i32 7, i32 4>
   ret <4 x i32> %s3
@@ -50,8 +51,9 @@ define <4 x i32> @test_shuf4(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: test_shuf4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp2 v0.4s, v2.4s, v4.4s
+; CHECK-NEXT:    dup v1.4s, v7.s[0]
 ; CHECK-NEXT:    trn1 v0.4s, v0.4s, v2.4s
-; CHECK-NEXT:    mov v0.s[3], v7.s[0]
+; CHECK-NEXT:    mov v0.s[3], v1.s[3]
 ; CHECK-NEXT:    ret
   %s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <4 x i32> <i32 9, i32 8, i32 17, i32 28>
   ret <4 x i32> %s3
@@ -60,9 +62,9 @@ define <4 x i32> @test_shuf4(<16 x i32> %x, <16 x i32> %y) {
 define <4 x i32> @test_shuf5(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: test_shuf5:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev64 v1.4s, v7.4s
 ; CHECK-NEXT:    ext v0.16b, v6.16b, v4.16b, #12
-; CHECK-NEXT:    mov v0.s[2], v7.s[3]
-; CHECK-NEXT:    mov v0.s[3], v7.s[2]
+; CHECK-NEXT:    mov v0.d[1], v1.d[1]
 ; CHECK-NEXT:    ret
   %s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <4 x i32> <i32 27, i32 16, i32 31, i32 30>
   ret <4 x i32> %s3

diff  --git a/llvm/test/CodeGen/ARM/fp16-insert-extract.ll b/llvm/test/CodeGen/ARM/fp16-insert-extract.ll
index d0cc516d14310..d95d908548b37 100644
--- a/llvm/test/CodeGen/ARM/fp16-insert-extract.ll
+++ b/llvm/test/CodeGen/ARM/fp16-insert-extract.ll
@@ -174,13 +174,15 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
 ; CHECKHARD-NEXT:    vmov r1, s0
 ; CHECKHARD-NEXT:    vmovx.f16 s12, s1
 ; CHECKHARD-NEXT:    vmov r0, s12
-; CHECKHARD-NEXT:    vrev32.16 d16, d3
-; CHECKHARD-NEXT:    vext.16 d17, d4, d5, #2
+; CHECKHARD-NEXT:    vext.16 d16, d4, d5, #2
 ; CHECKHARD-NEXT:    vmovx.f16 s12, s4
-; CHECKHARD-NEXT:    vext.16 d16, d16, d3, #1
-; CHECKHARD-NEXT:    vext.16 d16, d17, d16, #2
-; CHECKHARD-NEXT:    vext.16 d16, d16, d17, #1
-; CHECKHARD-NEXT:    vext.16 d17, d16, d16, #1
+; CHECKHARD-NEXT:    vdup.16 q11, d3[1]
+; CHECKHARD-NEXT:    vrev32.16 d17, d16
+; CHECKHARD-NEXT:    vext.16 d16, d16, d17, #3
+; CHECKHARD-NEXT:    vrev32.16 d17, d3
+; CHECKHARD-NEXT:    vext.16 d17, d17, d3, #1
+; CHECKHARD-NEXT:    vext.16 d16, d16, d17, #2
+; CHECKHARD-NEXT:    vext.16 d17, d16, d16, #2
 ; CHECKHARD-NEXT:    vmov.16 d16[0], r1
 ; CHECKHARD-NEXT:    vmov.16 d16[1], r0
 ; CHECKHARD-NEXT:    vmov r0, s3
@@ -192,38 +194,37 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
 ; CHECKHARD-NEXT:    vmov.16 d16[3], r0
 ; CHECKHARD-NEXT:    vmov r0, s2
 ; CHECKHARD-NEXT:    vmov.16 d18[0], r1
+; CHECKHARD-NEXT:    vmov r1, s8
 ; CHECKHARD-NEXT:    vmov.16 d18[1], r0
 ; CHECKHARD-NEXT:    vmov r0, s12
-; CHECKHARD-NEXT:    vdup.16 q3, d3[1]
-; CHECKHARD-NEXT:    vmov r1, s12
 ; CHECKHARD-NEXT:    vmovx.f16 s12, s9
+; CHECKHARD-NEXT:    vmov.16 d20[1], r1
 ; CHECKHARD-NEXT:    vmov.16 d18[2], r0
 ; CHECKHARD-NEXT:    vmov r0, s5
 ; CHECKHARD-NEXT:    vmov.16 d18[3], r0
-; CHECKHARD-NEXT:    vmov r0, s8
-; CHECKHARD-NEXT:    vmov.16 d19[0], r1
-; CHECKHARD-NEXT:    vmov.16 d19[1], r0
 ; CHECKHARD-NEXT:    vmov r0, s12
-; CHECKHARD-NEXT:    vmov.16 d19[2], r0
+; CHECKHARD-NEXT:    vmov.16 d20[2], r0
 ; CHECKHARD-NEXT:    vmov r0, s11
-; CHECKHARD-NEXT:    vmov.16 d19[3], r0
+; CHECKHARD-NEXT:    vmov.16 d20[3], r0
+; CHECKHARD-NEXT:    vmov r0, s10
+; CHECKHARD-NEXT:    vext.16 d20, d20, d22, #1
+; CHECKHARD-NEXT:    vdup.16 q11, d3[2]
+; CHECKHARD-NEXT:    vext.16 d19, d20, d20, #3
 ; CHECKHARD-NEXT:    vadd.f16 q8, q8, q9
 ; CHECKHARD-NEXT:    vext.16 d18, d0, d1, #2
 ; CHECKHARD-NEXT:    vmovx.f16 s0, s8
-; CHECKHARD-NEXT:    vmov r0, s0
-; CHECKHARD-NEXT:    vdup.16 q0, d3[2]
-; CHECKHARD-NEXT:    vext.16 d19, d18, d2, #3
 ; CHECKHARD-NEXT:    vmov r1, s0
-; CHECKHARD-NEXT:    vext.16 d18, d2, d18, #1
 ; CHECKHARD-NEXT:    vmovx.f16 s0, s11
+; CHECKHARD-NEXT:    vext.16 d19, d18, d2, #3
+; CHECKHARD-NEXT:    vext.16 d18, d2, d18, #1
 ; CHECKHARD-NEXT:    vext.16 d18, d18, d19, #2
 ; CHECKHARD-NEXT:    vext.16 d18, d18, d18, #1
-; CHECKHARD-NEXT:    vmov.16 d19[0], r1
-; CHECKHARD-NEXT:    vmov.16 d19[1], r0
-; CHECKHARD-NEXT:    vmov r0, s10
-; CHECKHARD-NEXT:    vmov.16 d19[2], r0
+; CHECKHARD-NEXT:    vmov.16 d20[1], r1
+; CHECKHARD-NEXT:    vmov.16 d20[2], r0
 ; CHECKHARD-NEXT:    vmov r0, s0
-; CHECKHARD-NEXT:    vmov.16 d19[3], r0
+; CHECKHARD-NEXT:    vmov.16 d20[3], r0
+; CHECKHARD-NEXT:    vext.16 d20, d20, d22, #1
+; CHECKHARD-NEXT:    vext.16 d19, d20, d20, #3
 ; CHECKHARD-NEXT:    vadd.f16 q0, q8, q9
 ; CHECKHARD-NEXT:    bx lr
 ;
@@ -232,13 +233,15 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
 ; CHECKSOFT-NEXT:    vmov r1, s0
 ; CHECKSOFT-NEXT:    vmovx.f16 s12, s1
 ; CHECKSOFT-NEXT:    vmov r0, s12
-; CHECKSOFT-NEXT:    vrev32.16 d16, d3
-; CHECKSOFT-NEXT:    vext.16 d17, d4, d5, #2
+; CHECKSOFT-NEXT:    vext.16 d16, d4, d5, #2
 ; CHECKSOFT-NEXT:    vmovx.f16 s12, s4
-; CHECKSOFT-NEXT:    vext.16 d16, d16, d3, #1
-; CHECKSOFT-NEXT:    vext.16 d16, d17, d16, #2
-; CHECKSOFT-NEXT:    vext.16 d16, d16, d17, #1
-; CHECKSOFT-NEXT:    vext.16 d17, d16, d16, #1
+; CHECKSOFT-NEXT:    vdup.16 q11, d3[1]
+; CHECKSOFT-NEXT:    vrev32.16 d17, d16
+; CHECKSOFT-NEXT:    vext.16 d16, d16, d17, #3
+; CHECKSOFT-NEXT:    vrev32.16 d17, d3
+; CHECKSOFT-NEXT:    vext.16 d17, d17, d3, #1
+; CHECKSOFT-NEXT:    vext.16 d16, d16, d17, #2
+; CHECKSOFT-NEXT:    vext.16 d17, d16, d16, #2
 ; CHECKSOFT-NEXT:    vmov.16 d16[0], r1
 ; CHECKSOFT-NEXT:    vmov.16 d16[1], r0
 ; CHECKSOFT-NEXT:    vmov r0, s3
@@ -250,38 +253,37 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
 ; CHECKSOFT-NEXT:    vmov.16 d16[3], r0
 ; CHECKSOFT-NEXT:    vmov r0, s2
 ; CHECKSOFT-NEXT:    vmov.16 d18[0], r1
+; CHECKSOFT-NEXT:    vmov r1, s8
 ; CHECKSOFT-NEXT:    vmov.16 d18[1], r0
 ; CHECKSOFT-NEXT:    vmov r0, s12
-; CHECKSOFT-NEXT:    vdup.16 q3, d3[1]
-; CHECKSOFT-NEXT:    vmov r1, s12
 ; CHECKSOFT-NEXT:    vmovx.f16 s12, s9
+; CHECKSOFT-NEXT:    vmov.16 d20[1], r1
 ; CHECKSOFT-NEXT:    vmov.16 d18[2], r0
 ; CHECKSOFT-NEXT:    vmov r0, s5
 ; CHECKSOFT-NEXT:    vmov.16 d18[3], r0
-; CHECKSOFT-NEXT:    vmov r0, s8
-; CHECKSOFT-NEXT:    vmov.16 d19[0], r1
-; CHECKSOFT-NEXT:    vmov.16 d19[1], r0
 ; CHECKSOFT-NEXT:    vmov r0, s12
-; CHECKSOFT-NEXT:    vmov.16 d19[2], r0
+; CHECKSOFT-NEXT:    vmov.16 d20[2], r0
 ; CHECKSOFT-NEXT:    vmov r0, s11
-; CHECKSOFT-NEXT:    vmov.16 d19[3], r0
+; CHECKSOFT-NEXT:    vmov.16 d20[3], r0
+; CHECKSOFT-NEXT:    vmov r0, s10
+; CHECKSOFT-NEXT:    vext.16 d20, d20, d22, #1
+; CHECKSOFT-NEXT:    vdup.16 q11, d3[2]
+; CHECKSOFT-NEXT:    vext.16 d19, d20, d20, #3
 ; CHECKSOFT-NEXT:    vadd.f16 q8, q8, q9
 ; CHECKSOFT-NEXT:    vext.16 d18, d0, d1, #2
 ; CHECKSOFT-NEXT:    vmovx.f16 s0, s8
-; CHECKSOFT-NEXT:    vmov r0, s0
-; CHECKSOFT-NEXT:    vdup.16 q0, d3[2]
-; CHECKSOFT-NEXT:    vext.16 d19, d18, d2, #3
 ; CHECKSOFT-NEXT:    vmov r1, s0
-; CHECKSOFT-NEXT:    vext.16 d18, d2, d18, #1
 ; CHECKSOFT-NEXT:    vmovx.f16 s0, s11
+; CHECKSOFT-NEXT:    vext.16 d19, d18, d2, #3
+; CHECKSOFT-NEXT:    vext.16 d18, d2, d18, #1
 ; CHECKSOFT-NEXT:    vext.16 d18, d18, d19, #2
 ; CHECKSOFT-NEXT:    vext.16 d18, d18, d18, #1
-; CHECKSOFT-NEXT:    vmov.16 d19[0], r1
-; CHECKSOFT-NEXT:    vmov.16 d19[1], r0
-; CHECKSOFT-NEXT:    vmov r0, s10
-; CHECKSOFT-NEXT:    vmov.16 d19[2], r0
+; CHECKSOFT-NEXT:    vmov.16 d20[1], r1
+; CHECKSOFT-NEXT:    vmov.16 d20[2], r0
 ; CHECKSOFT-NEXT:    vmov r0, s0
-; CHECKSOFT-NEXT:    vmov.16 d19[3], r0
+; CHECKSOFT-NEXT:    vmov.16 d20[3], r0
+; CHECKSOFT-NEXT:    vext.16 d20, d20, d22, #1
+; CHECKSOFT-NEXT:    vext.16 d19, d20, d20, #3
 ; CHECKSOFT-NEXT:    vadd.f16 q0, q8, q9
 ; CHECKSOFT-NEXT:    bx lr
 entry:

diff  --git a/llvm/test/CodeGen/PowerPC/pr27078.ll b/llvm/test/CodeGen/PowerPC/pr27078.ll
index 9a1bd3e1957fe..e1532f05ea4e9 100644
--- a/llvm/test/CodeGen/PowerPC/pr27078.ll
+++ b/llvm/test/CodeGen/PowerPC/pr27078.ll
@@ -7,22 +7,23 @@ define <4 x float> @bar(float* %p, float* %q) {
 ; CHECK-NEXT:    li 5, 16
 ; CHECK-NEXT:    lxvw4x 2, 0, 3
 ; CHECK-NEXT:    lxvw4x 3, 0, 4
-; CHECK-NEXT:    addis 6, 2, .LCPI0_0 at toc@ha
 ; CHECK-NEXT:    lxvw4x 0, 3, 5
 ; CHECK-NEXT:    lxvw4x 1, 4, 5
 ; CHECK-NEXT:    li 5, 32
 ; CHECK-NEXT:    xvsubsp 35, 3, 2
 ; CHECK-NEXT:    xvsubsp 34, 1, 0
 ; CHECK-NEXT:    lxvw4x 0, 3, 5
-; CHECK-NEXT:    addi 3, 6, .LCPI0_0 at toc@l
 ; CHECK-NEXT:    lxvw4x 1, 4, 5
+; CHECK-NEXT:    addis 3, 2, .LCPI0_0 at toc@ha
+; CHECK-NEXT:    addi 3, 3, .LCPI0_0 at toc@l
 ; CHECK-NEXT:    lxvw4x 36, 0, 3
 ; CHECK-NEXT:    addis 3, 2, .LCPI0_1 at toc@ha
+; CHECK-NEXT:    xvsubsp 0, 1, 0
 ; CHECK-NEXT:    addi 3, 3, .LCPI0_1 at toc@l
-; CHECK-NEXT:    xvsubsp 37, 1, 0
 ; CHECK-NEXT:    vperm 2, 3, 2, 4
-; CHECK-NEXT:    lxvw4x 35, 0, 3
-; CHECK-NEXT:    vperm 2, 2, 5, 3
+; CHECK-NEXT:    lxvw4x 36, 0, 3
+; CHECK-NEXT:    xxmrghw 35, 0, 0
+; CHECK-NEXT:    vperm 2, 2, 3, 4
 ; CHECK-NEXT:    blr
   %1 = bitcast float* %p to <12 x float>*
   %2 = bitcast float* %q to <12 x float>*

diff  --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 4f7e5ba2dbf80..92ed9280bc8b4 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -351,42 +351,54 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @shuffle3step_i16(<32 x i16> %src) {
 ; CHECK-LABEL: shuffle3step_i16:
 ; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d11, d12, d13}
+; CHECK-NEXT:    vpush {d11, d12, d13}
 ; CHECK-NEXT:    .vsave {d8, d9}
 ; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vmovx.f16 s12, s0
-; CHECK-NEXT:    vmov.f32 s16, s1
-; CHECK-NEXT:    vins.f16 s12, s2
-; CHECK-NEXT:    vmovx.f16 s2, s2
-; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s5
+; CHECK-NEXT:    vmov.f32 s12, s0
+; CHECK-NEXT:    vmovx.f16 s14, s1
+; CHECK-NEXT:    vins.f16 s12, s14
+; CHECK-NEXT:    vmovx.f16 s14, s4
+; CHECK-NEXT:    vmov.f32 s13, s3
+; CHECK-NEXT:    vmovx.f16 s15, s7
+; CHECK-NEXT:    vins.f16 s13, s14
+; CHECK-NEXT:    vmov.f32 s14, s6
+; CHECK-NEXT:    vins.f16 s14, s15
+; CHECK-NEXT:    vmovx.f16 s15, s2
+; CHECK-NEXT:    vins.f16 s1, s15
+; CHECK-NEXT:    vmovx.f16 s15, s5
 ; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmovx.f16 s13, s3
-; CHECK-NEXT:    vins.f16 s17, s2
+; CHECK-NEXT:    vmovx.f16 s0, s0
+; CHECK-NEXT:    vins.f16 s17, s15
+; CHECK-NEXT:    vmov.f32 s16, s1
+; CHECK-NEXT:    vmovx.f16 s1, s10
+; CHECK-NEXT:    vmov.f32 s15, s9
+; CHECK-NEXT:    vins.f16 s15, s1
+; CHECK-NEXT:    vmovx.f16 s1, s11
+; CHECK-NEXT:    vins.f16 s10, s1
+; CHECK-NEXT:    vmovx.f16 s1, s3
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
 ; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vins.f16 s18, s2
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vins.f16 s19, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
+; CHECK-NEXT:    vmovx.f16 s7, s9
+; CHECK-NEXT:    vmov.f32 s23, s10
+; CHECK-NEXT:    vmov.f32 s22, s8
 ; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vmovx.f16 s2, s4
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s7
-; CHECK-NEXT:    vmovx.f16 s4, s10
-; CHECK-NEXT:    vmovx.f16 s14, s6
-; CHECK-NEXT:    vmovx.f16 s15, s9
-; CHECK-NEXT:    vins.f16 s6, s2
-; CHECK-NEXT:    vins.f16 s9, s4
-; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s14, s8
-; CHECK-NEXT:    vins.f16 s15, s11
-; CHECK-NEXT:    vins.f16 s13, s5
-; CHECK-NEXT:    vmov.f32 s2, s6
-; CHECK-NEXT:    vmov.f32 s3, s9
-; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vins.f16 s1, s5
+; CHECK-NEXT:    vmov q6, q5
+; CHECK-NEXT:    vins.f16 s6, s8
+; CHECK-NEXT:    vins.f16 s7, s11
+; CHECK-NEXT:    vmovnb.i32 q6, q4
+; CHECK-NEXT:    vmov.f32 s19, s10
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov q2, q1
+; CHECK-NEXT:    vmovnb.i32 q2, q0
+; CHECK-NEXT:    vmov.f32 s3, s7
+; CHECK-NEXT:    vmov.f32 s2, s10
+; CHECK-NEXT:    vmov.f32 s18, s26
+; CHECK-NEXT:    vadd.i16 q0, q3, q0
 ; CHECK-NEXT:    vadd.i16 q0, q0, q4
 ; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vpop {d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
   %s1 = shufflevector <32 x i16> %src, <32 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@@ -691,8 +703,8 @@ entry:
 define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
 ; CHECK-LABEL: shuffle3step_i8:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    vmov.u8 r0, q0[1]
 ; CHECK-NEXT:    vmov.8 q3[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[4]
@@ -707,14 +719,14 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
 ; CHECK-NEXT:    vmov.8 q3[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[3]
 ; CHECK-NEXT:    vmov.8 q3[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.8 q3[7], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[9]
-; CHECK-NEXT:    vmov.8 q4[8], r0
+; CHECK-NEXT:    vmov.8 q3[8], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[12]
-; CHECK-NEXT:    vmov.8 q4[9], r0
+; CHECK-NEXT:    vmov.8 q3[9], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[15]
-; CHECK-NEXT:    vmov.8 q4[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[2]
-; CHECK-NEXT:    vmov.8 q4[11], r0
+; CHECK-NEXT:    vmov.8 q3[10], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[5]
 ; CHECK-NEXT:    vmov.8 q4[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[8]
@@ -723,11 +735,11 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
 ; CHECK-NEXT:    vmov.8 q4[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[14]
 ; CHECK-NEXT:    vmov.8 q4[15], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[6]
-; CHECK-NEXT:    vmov.8 q3[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q0[0]
-; CHECK-NEXT:    vmov.f32 s14, s18
+; CHECK-NEXT:    vmov q5, q3
+; CHECK-NEXT:    vmov.u8 r0, q2[2]
 ; CHECK-NEXT:    vmov.f32 s15, s19
+; CHECK-NEXT:    vmov.8 q5[11], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
 ; CHECK-NEXT:    vmov.8 q4[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[3]
 ; CHECK-NEXT:    vmov.8 q4[1], r0
@@ -741,27 +753,29 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
 ; CHECK-NEXT:    vmov.8 q4[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[2]
 ; CHECK-NEXT:    vmov.8 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.8 q4[7], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[8]
-; CHECK-NEXT:    vmov.8 q5[8], r0
+; CHECK-NEXT:    vmov.8 q4[8], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[11]
-; CHECK-NEXT:    vmov.8 q5[9], r0
+; CHECK-NEXT:    vmov.8 q4[9], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[14]
-; CHECK-NEXT:    vmov.8 q5[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[1]
-; CHECK-NEXT:    vmov.8 q5[11], r0
+; CHECK-NEXT:    vmov.8 q4[10], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[4]
+; CHECK-NEXT:    vmov.f32 s14, s22
 ; CHECK-NEXT:    vmov.8 q5[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[7]
+; CHECK-NEXT:    vmov q6, q4
 ; CHECK-NEXT:    vmov.8 q5[13], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[10]
 ; CHECK-NEXT:    vmov.8 q5[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[13]
 ; CHECK-NEXT:    vmov.8 q5[15], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[5]
-; CHECK-NEXT:    vmov.8 q4[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q0[2]
-; CHECK-NEXT:    vmov.f32 s18, s22
+; CHECK-NEXT:    vmov.u8 r0, q2[1]
+; CHECK-NEXT:    vmov.8 q6[11], r0
 ; CHECK-NEXT:    vmov.f32 s19, s23
+; CHECK-NEXT:    vmov.f32 s18, s26
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
 ; CHECK-NEXT:    vadd.i8 q3, q4, q3
 ; CHECK-NEXT:    vmov.8 q4[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[5]
@@ -776,14 +790,6 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
 ; CHECK-NEXT:    vmov.8 q4[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[4]
 ; CHECK-NEXT:    vmov.8 q4[6], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[10]
-; CHECK-NEXT:    vmov.8 q0[8], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[13]
-; CHECK-NEXT:    vmov.8 q0[9], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[0]
-; CHECK-NEXT:    vmov.8 q0[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[3]
-; CHECK-NEXT:    vmov.8 q0[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[6]
 ; CHECK-NEXT:    vmov.8 q0[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[9]
@@ -792,12 +798,20 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
 ; CHECK-NEXT:    vmov.8 q0[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[15]
 ; CHECK-NEXT:    vmov.8 q0[15], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.8 q5[8], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.8 q5[9], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[0]
+; CHECK-NEXT:    vmov.8 q5[10], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[3]
+; CHECK-NEXT:    vmov.8 q5[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[7]
 ; CHECK-NEXT:    vmov.8 q4[7], r0
-; CHECK-NEXT:    vmov.f32 s18, s2
+; CHECK-NEXT:    vmov.f32 s18, s22
 ; CHECK-NEXT:    vmov.f32 s19, s3
 ; CHECK-NEXT:    vadd.i8 q0, q3, q4
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
   %s1 = shufflevector <64 x i8> %src, <64 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
@@ -1319,42 +1333,43 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
 ; CHECKFP-LABEL: shuffle3step_f16:
 ; CHECKFP:       @ %bb.0: @ %entry
-; CHECKFP-NEXT:    .vsave {d8, d9}
-; CHECKFP-NEXT:    vpush {d8, d9}
+; CHECKFP-NEXT:    .vsave {d8, d9, d10}
+; CHECKFP-NEXT:    vpush {d8, d9, d10}
+; CHECKFP-NEXT:    vmov.f32 s12, s1
+; CHECKFP-NEXT:    vmovx.f16 s14, s2
+; CHECKFP-NEXT:    vins.f16 s12, s14
 ; CHECKFP-NEXT:    vmov.f32 s13, s4
+; CHECKFP-NEXT:    vmovx.f16 s14, s5
+; CHECKFP-NEXT:    vmov.f32 s15, s10
+; CHECKFP-NEXT:    vins.f16 s13, s14
+; CHECKFP-NEXT:    vmovx.f16 s14, s11
+; CHECKFP-NEXT:    vins.f16 s15, s14
+; CHECKFP-NEXT:    vmov.f32 s14, s7
+; CHECKFP-NEXT:    vmovx.f16 s16, s8
 ; CHECKFP-NEXT:    vmovx.f16 s4, s4
+; CHECKFP-NEXT:    vmovx.f16 s7, s7
+; CHECKFP-NEXT:    vmov.f32 s20, s6
+; CHECKFP-NEXT:    vmovx.f16 s10, s10
 ; CHECKFP-NEXT:    vmovx.f16 s17, s3
-; CHECKFP-NEXT:    vins.f16 s3, s4
-; CHECKFP-NEXT:    vmovx.f16 s4, s7
+; CHECKFP-NEXT:    vmovx.f16 s19, s9
 ; CHECKFP-NEXT:    vmovx.f16 s18, s6
+; CHECKFP-NEXT:    vins.f16 s14, s16
 ; CHECKFP-NEXT:    vmovx.f16 s16, s0
-; CHECKFP-NEXT:    vins.f16 s6, s4
-; CHECKFP-NEXT:    vmovx.f16 s14, s2
-; CHECKFP-NEXT:    vmov.f32 s12, s1
-; CHECKFP-NEXT:    vmovx.f16 s4, s10
-; CHECKFP-NEXT:    vmovx.f16 s19, s9
-; CHECKFP-NEXT:    vins.f16 s12, s14
-; CHECKFP-NEXT:    vmovx.f16 s14, s5
+; CHECKFP-NEXT:    vmovx.f16 s1, s1
+; CHECKFP-NEXT:    vins.f16 s20, s7
+; CHECKFP-NEXT:    vins.f16 s3, s4
+; CHECKFP-NEXT:    vins.f16 s9, s10
+; CHECKFP-NEXT:    vins.f16 s0, s1
 ; CHECKFP-NEXT:    vins.f16 s16, s2
-; CHECKFP-NEXT:    vmovx.f16 s2, s11
-; CHECKFP-NEXT:    vmovx.f16 s15, s8
-; CHECKFP-NEXT:    vins.f16 s18, s8
-; CHECKFP-NEXT:    vmovx.f16 s8, s1
-; CHECKFP-NEXT:    vins.f16 s9, s4
-; CHECKFP-NEXT:    vins.f16 s13, s14
-; CHECKFP-NEXT:    vmov.f32 s14, s7
-; CHECKFP-NEXT:    vins.f16 s10, s2
 ; CHECKFP-NEXT:    vmov.f32 s1, s3
-; CHECKFP-NEXT:    vins.f16 s19, s11
 ; CHECKFP-NEXT:    vins.f16 s17, s5
-; CHECKFP-NEXT:    vins.f16 s0, s8
-; CHECKFP-NEXT:    vmov.f32 s2, s6
+; CHECKFP-NEXT:    vins.f16 s19, s11
+; CHECKFP-NEXT:    vins.f16 s18, s8
+; CHECKFP-NEXT:    vmov.f32 s2, s20
 ; CHECKFP-NEXT:    vmov.f32 s3, s9
-; CHECKFP-NEXT:    vins.f16 s14, s15
-; CHECKFP-NEXT:    vmov.f32 s15, s10
 ; CHECKFP-NEXT:    vadd.f16 q0, q0, q4
 ; CHECKFP-NEXT:    vadd.f16 q0, q0, q3
-; CHECKFP-NEXT:    vpop {d8, d9}
+; CHECKFP-NEXT:    vpop {d8, d9, d10}
 ; CHECKFP-NEXT:    bx lr
 entry:
   %s1 = shufflevector <32 x half> %src, <32 x half> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 6d14b7020a1af..bf76ba3a513ca 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -287,46 +287,55 @@ entry:
 define void @vld3_v8i16(<24 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-LABEL: vld3_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vmovx.f16 s6, s2
-; CHECK-NEXT:    vmov.f32 s4, s1
-; CHECK-NEXT:    vins.f16 s4, s6
-; CHECK-NEXT:    vmovx.f16 s6, s9
-; CHECK-NEXT:    vmov.f32 s5, s8
-; CHECK-NEXT:    vmovx.f16 s7, s12
-; CHECK-NEXT:    vins.f16 s5, s6
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vins.f16 s6, s7
-; CHECK-NEXT:    vmovx.f16 s16, s15
-; CHECK-NEXT:    vmov.f32 s7, s14
-; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s7, s16
-; CHECK-NEXT:    vmovx.f16 s16, s0
-; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
+; CHECK-NEXT:    vmov.f32 s0, s5
+; CHECK-NEXT:    vmovx.f16 s2, s6
 ; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vmovx.f16 s8, s14
-; CHECK-NEXT:    vmovx.f16 s18, s10
-; CHECK-NEXT:    vmovx.f16 s19, s13
-; CHECK-NEXT:    vins.f16 s10, s2
-; CHECK-NEXT:    vins.f16 s13, s8
-; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s18, s12
-; CHECK-NEXT:    vins.f16 s19, s15
-; CHECK-NEXT:    vins.f16 s17, s9
-; CHECK-NEXT:    vmov.f32 s2, s10
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vadd.i16 q0, q0, q4
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vmovx.f16 s2, s9
+; CHECK-NEXT:    vmov.f32 s1, s8
+; CHECK-NEXT:    vmovx.f16 s5, s5
+; CHECK-NEXT:    vins.f16 s1, s2
+; CHECK-NEXT:    vmov.f32 s19, s14
+; CHECK-NEXT:    vmovx.f16 s2, s15
+; CHECK-NEXT:    vmov.f32 s18, s12
+; CHECK-NEXT:    vins.f16 s19, s2
+; CHECK-NEXT:    vmov.f32 s2, s11
+; CHECK-NEXT:    vmov q5, q4
+; CHECK-NEXT:    vmov.f32 s16, s4
+; CHECK-NEXT:    vins.f16 s16, s5
+; CHECK-NEXT:    vmovx.f16 s5, s8
+; CHECK-NEXT:    vmov.f32 s17, s7
+; CHECK-NEXT:    vmovx.f16 s4, s4
+; CHECK-NEXT:    vins.f16 s17, s5
+; CHECK-NEXT:    vmovx.f16 s5, s11
+; CHECK-NEXT:    vmov.f32 s18, s10
+; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmovx.f16 s11, s13
+; CHECK-NEXT:    vins.f16 s18, s5
+; CHECK-NEXT:    vmovx.f16 s5, s7
+; CHECK-NEXT:    vmovnb.i32 q5, q0
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vmovx.f16 s14, s14
+; CHECK-NEXT:    vmov.f32 s19, s13
+; CHECK-NEXT:    vins.f16 s4, s6
+; CHECK-NEXT:    vins.f16 s5, s9
+; CHECK-NEXT:    vins.f16 s10, s12
+; CHECK-NEXT:    vins.f16 s11, s15
+; CHECK-NEXT:    vins.f16 s19, s14
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov q3, q2
+; CHECK-NEXT:    vmovnb.i32 q3, q1
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmov.f32 s6, s14
+; CHECK-NEXT:    vmov.f32 s2, s22
+; CHECK-NEXT:    vadd.i16 q1, q4, q1
+; CHECK-NEXT:    vadd.i16 q0, q1, q0
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
   %l1 = load <24 x i16>, <24 x i16>* %src, align 4
@@ -342,83 +351,101 @@ entry:
 define void @vld3_v16i16(<48 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-LABEL: vld3_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmovx.f16 s6, s2
-; CHECK-NEXT:    vmov.f32 s4, s1
-; CHECK-NEXT:    vins.f16 s4, s6
-; CHECK-NEXT:    vmovx.f16 s6, s9
-; CHECK-NEXT:    vmov.f32 s5, s8
-; CHECK-NEXT:    vmovx.f16 s7, s12
-; CHECK-NEXT:    vins.f16 s5, s6
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vins.f16 s6, s7
-; CHECK-NEXT:    vmovx.f16 s16, s15
-; CHECK-NEXT:    vmov.f32 s7, s14
-; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s7, s16
-; CHECK-NEXT:    vmovx.f16 s16, s0
-; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT:    vmov.f32 s0, s4
+; CHECK-NEXT:    vmovx.f16 s2, s5
 ; CHECK-NEXT:    vins.f16 s0, s2
 ; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vmovx.f16 s8, s14
-; CHECK-NEXT:    vmovx.f16 s18, s10
-; CHECK-NEXT:    vmovx.f16 s19, s13
-; CHECK-NEXT:    vins.f16 s10, s2
-; CHECK-NEXT:    vins.f16 s13, s8
-; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s18, s12
-; CHECK-NEXT:    vins.f16 s19, s15
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
-; CHECK-NEXT:    vins.f16 s17, s9
+; CHECK-NEXT:    vmov.f32 s1, s7
+; CHECK-NEXT:    vmovx.f16 s12, s11
+; CHECK-NEXT:    vins.f16 s1, s2
 ; CHECK-NEXT:    vmov.f32 s2, s10
-; CHECK-NEXT:    vadd.i16 q0, q0, q4
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-NEXT:    vmovx.f16 s14, s18
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vins.f16 s2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s6
+; CHECK-NEXT:    vins.f16 s3, s14
+; CHECK-NEXT:    vmovx.f16 s14, s19
+; CHECK-NEXT:    vins.f16 s18, s14
+; CHECK-NEXT:    vins.f16 s5, s12
+; CHECK-NEXT:    vmovx.f16 s12, s9
+; CHECK-NEXT:    vmov.f32 s13, s8
+; CHECK-NEXT:    vmovx.f16 s4, s4
+; CHECK-NEXT:    vins.f16 s13, s12
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmovx.f16 s5, s7
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.f32 s14, s11
+; CHECK-NEXT:    vmovx.f16 s11, s17
+; CHECK-NEXT:    vmov.f32 s23, s18
+; CHECK-NEXT:    vmov.f32 s22, s16
+; CHECK-NEXT:    vins.f16 s4, s6
+; CHECK-NEXT:    vins.f16 s5, s9
+; CHECK-NEXT:    vmov q6, q5
+; CHECK-NEXT:    vins.f16 s10, s16
+; CHECK-NEXT:    vins.f16 s11, s19
+; CHECK-NEXT:    vmovnb.i32 q6, q3
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov q4, q2
+; CHECK-NEXT:    vmovnb.i32 q4, q1
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmov.f32 s6, s18
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
 ; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vmovx.f16 s6, s14
-; CHECK-NEXT:    vldrw.u32 q4, [r0]
-; CHECK-NEXT:    vins.f16 s6, s8
-; CHECK-NEXT:    vmov.f32 s22, s15
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s14, s26
+; CHECK-NEXT:    vmovx.f16 s6, s10
+; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-NEXT:    vmov.f32 s4, s9
+; CHECK-NEXT:    vmovx.f16 s7, s19
+; CHECK-NEXT:    vmov.f32 s27, s18
+; CHECK-NEXT:    vins.f16 s4, s6
+; CHECK-NEXT:    vmovx.f16 s6, s13
+; CHECK-NEXT:    vmov.f32 s5, s12
+; CHECK-NEXT:    vins.f16 s27, s7
+; CHECK-NEXT:    vmov.f32 s26, s16
+; CHECK-NEXT:    vins.f16 s5, s6
+; CHECK-NEXT:    vmov.f32 s6, s15
+; CHECK-NEXT:    vmov q7, q6
+; CHECK-NEXT:    vmov.f32 s20, s8
+; CHECK-NEXT:    vmovnb.i32 q7, q1
+; CHECK-NEXT:    vmovx.f16 s6, s9
+; CHECK-NEXT:    vins.f16 s20, s6
+; CHECK-NEXT:    vmovx.f16 s6, s12
+; CHECK-NEXT:    vmov.f32 s21, s11
 ; CHECK-NEXT:    vmovx.f16 s8, s8
-; CHECK-NEXT:    vins.f16 s22, s8
-; CHECK-NEXT:    vmovx.f16 s8, s11
-; CHECK-NEXT:    vmov.f32 s23, s10
-; CHECK-NEXT:    vmovx.f16 s4, s16
-; CHECK-NEXT:    vins.f16 s23, s8
-; CHECK-NEXT:    vmovx.f16 s8, s17
-; CHECK-NEXT:    vins.f16 s16, s8
-; CHECK-NEXT:    vmovx.f16 s8, s12
-; CHECK-NEXT:    vmovx.f16 s5, s19
-; CHECK-NEXT:    vins.f16 s19, s8
-; CHECK-NEXT:    vmovx.f16 s8, s15
-; CHECK-NEXT:    vmovx.f16 s7, s9
-; CHECK-NEXT:    vins.f16 s14, s8
-; CHECK-NEXT:    vmovx.f16 s8, s10
-; CHECK-NEXT:    vins.f16 s4, s18
-; CHECK-NEXT:    vmov.f32 s20, s17
-; CHECK-NEXT:    vmovx.f16 s18, s18
-; CHECK-NEXT:    vins.f16 s9, s8
-; CHECK-NEXT:    vins.f16 s5, s13
-; CHECK-NEXT:    vins.f16 s20, s18
-; CHECK-NEXT:    vmov.f32 s17, s19
-; CHECK-NEXT:    vins.f16 s7, s11
-; CHECK-NEXT:    vmovx.f16 s13, s13
-; CHECK-NEXT:    vmov.f32 s21, s12
-; CHECK-NEXT:    vmov.f32 s18, s14
-; CHECK-NEXT:    vins.f16 s21, s13
-; CHECK-NEXT:    vmov.f32 s19, s9
+; CHECK-NEXT:    vmovx.f16 s9, s11
+; CHECK-NEXT:    vins.f16 s21, s6
+; CHECK-NEXT:    vmovx.f16 s6, s15
+; CHECK-NEXT:    vmov.u16 r0, q3[5]
+; CHECK-NEXT:    vmovx.f16 s15, s17
+; CHECK-NEXT:    vmov.f32 s22, s14
+; CHECK-NEXT:    vins.f16 s8, s10
+; CHECK-NEXT:    vins.f16 s9, s13
+; CHECK-NEXT:    vins.f16 s14, s16
+; CHECK-NEXT:    vins.f16 s15, s19
+; CHECK-NEXT:    vins.f16 s22, s6
+; CHECK-NEXT:    vmovx.f16 s6, s18
+; CHECK-NEXT:    vmov.f32 s23, s17
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov q4, q3
+; CHECK-NEXT:    vins.f16 s23, s6
+; CHECK-NEXT:    vmovnb.i32 q4, q2
+; CHECK-NEXT:    vmov.f32 s11, s15
+; CHECK-NEXT:    vmov.f32 s10, s18
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vadd.i16 q1, q4, q1
-; CHECK-NEXT:    vadd.i16 q1, q1, q5
+; CHECK-NEXT:    vmov.f32 s6, s30
+; CHECK-NEXT:    vadd.i16 q2, q5, q2
+; CHECK-NEXT:    vmov.f32 s7, s27
+; CHECK-NEXT:    vadd.i16 q1, q2, q1
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
   %l1 = load <48 x i16>, <48 x i16>* %src, align 4
@@ -584,8 +611,8 @@ entry:
 define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-LABEL: vld3_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
@@ -603,27 +630,27 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.8 q3[5], r2
 ; CHECK-NEXT:    vmov.u8 r2, q0[3]
 ; CHECK-NEXT:    vmov.8 q3[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    vmov.8 q3[7], r2
 ; CHECK-NEXT:    vmov.u8 r2, q0[9]
-; CHECK-NEXT:    vmov.8 q4[8], r2
-; CHECK-NEXT:    vmov.u8 r2, q0[12]
-; CHECK-NEXT:    vmov.8 q4[9], r2
-; CHECK-NEXT:    vmov.u8 r2, q0[15]
-; CHECK-NEXT:    vmov.8 q4[10], r2
-; CHECK-NEXT:    vmov.u8 r0, q2[2]
-; CHECK-NEXT:    vmov.8 q4[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[5]
+; CHECK-NEXT:    vmov.8 q3[8], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
 ; CHECK-NEXT:    vmov.8 q4[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[8]
+; CHECK-NEXT:    vmov.8 q3[9], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
 ; CHECK-NEXT:    vmov.8 q4[13], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[11]
+; CHECK-NEXT:    vmov.8 q3[10], r2
 ; CHECK-NEXT:    vmov.8 q4[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[14]
 ; CHECK-NEXT:    vmov.8 q4[15], r0
-; CHECK-NEXT:    vmov.u8 r0, q0[6]
-; CHECK-NEXT:    vmov.8 q3[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[0]
-; CHECK-NEXT:    vmov.f32 s14, s18
+; CHECK-NEXT:    vmov q5, q3
+; CHECK-NEXT:    vmov.u8 r0, q2[2]
 ; CHECK-NEXT:    vmov.f32 s15, s19
+; CHECK-NEXT:    vmov.8 q5[11], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
 ; CHECK-NEXT:    vmov.8 q4[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[3]
 ; CHECK-NEXT:    vmov.8 q4[1], r0
@@ -637,27 +664,29 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.8 q4[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[2]
 ; CHECK-NEXT:    vmov.8 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.8 q4[7], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[8]
-; CHECK-NEXT:    vmov.8 q5[8], r0
+; CHECK-NEXT:    vmov.8 q4[8], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[11]
-; CHECK-NEXT:    vmov.8 q5[9], r0
+; CHECK-NEXT:    vmov.8 q4[9], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[14]
-; CHECK-NEXT:    vmov.8 q5[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[1]
-; CHECK-NEXT:    vmov.8 q5[11], r0
+; CHECK-NEXT:    vmov.8 q4[10], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[4]
+; CHECK-NEXT:    vmov.f32 s14, s22
 ; CHECK-NEXT:    vmov.8 q5[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[7]
+; CHECK-NEXT:    vmov q6, q4
 ; CHECK-NEXT:    vmov.8 q5[13], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[10]
 ; CHECK-NEXT:    vmov.8 q5[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[13]
 ; CHECK-NEXT:    vmov.8 q5[15], r0
-; CHECK-NEXT:    vmov.u8 r0, q0[5]
-; CHECK-NEXT:    vmov.8 q4[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[2]
-; CHECK-NEXT:    vmov.f32 s18, s22
+; CHECK-NEXT:    vmov.u8 r0, q2[1]
+; CHECK-NEXT:    vmov.8 q6[11], r0
 ; CHECK-NEXT:    vmov.f32 s19, s23
+; CHECK-NEXT:    vmov.f32 s18, s26
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
 ; CHECK-NEXT:    vadd.i8 q3, q4, q3
 ; CHECK-NEXT:    vmov.8 q4[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[5]
@@ -672,14 +701,6 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.8 q4[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[4]
 ; CHECK-NEXT:    vmov.8 q4[6], r0
-; CHECK-NEXT:    vmov.u8 r0, q0[10]
-; CHECK-NEXT:    vmov.8 q1[8], r0
-; CHECK-NEXT:    vmov.u8 r0, q0[13]
-; CHECK-NEXT:    vmov.8 q1[9], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[0]
-; CHECK-NEXT:    vmov.8 q1[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[3]
-; CHECK-NEXT:    vmov.8 q1[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[6]
 ; CHECK-NEXT:    vmov.8 q1[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[9]
@@ -688,13 +709,21 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.8 q1[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[15]
 ; CHECK-NEXT:    vmov.8 q1[15], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.8 q5[8], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.8 q5[9], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[0]
+; CHECK-NEXT:    vmov.8 q5[10], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[3]
+; CHECK-NEXT:    vmov.8 q5[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q0[7]
 ; CHECK-NEXT:    vmov.8 q4[7], r0
-; CHECK-NEXT:    vmov.f32 s18, s6
+; CHECK-NEXT:    vmov.f32 s18, s22
 ; CHECK-NEXT:    vmov.f32 s19, s7
 ; CHECK-NEXT:    vadd.i8 q0, q3, q4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
   %l1 = load <48 x i8>, <48 x i8>* %src, align 4
@@ -1092,46 +1121,47 @@ entry:
 define void @vld3_v8f16(<24 x half> *%src, <8 x half> *%dst) {
 ; CHECK-LABEL: vld3_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s5, s8
-; CHECK-NEXT:    vmovx.f16 s8, s8
-; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s3, s8
-; CHECK-NEXT:    vmovx.f16 s8, s11
-; CHECK-NEXT:    vmovx.f16 s18, s10
-; CHECK-NEXT:    vmovx.f16 s16, s0
-; CHECK-NEXT:    vins.f16 s10, s8
-; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
 ; CHECK-NEXT:    vmov.f32 s4, s1
-; CHECK-NEXT:    vmovx.f16 s8, s14
-; CHECK-NEXT:    vmovx.f16 s19, s13
+; CHECK-NEXT:    vmovx.f16 s6, s2
 ; CHECK-NEXT:    vins.f16 s4, s6
-; CHECK-NEXT:    vmovx.f16 s6, s9
-; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s15
-; CHECK-NEXT:    vmovx.f16 s7, s12
-; CHECK-NEXT:    vins.f16 s18, s12
-; CHECK-NEXT:    vmovx.f16 s12, s1
-; CHECK-NEXT:    vins.f16 s13, s8
+; CHECK-NEXT:    vmov.f32 s5, s12
+; CHECK-NEXT:    vmovx.f16 s6, s13
+; CHECK-NEXT:    vmov.f32 s7, s10
 ; CHECK-NEXT:    vins.f16 s5, s6
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vins.f16 s14, s2
+; CHECK-NEXT:    vmovx.f16 s6, s11
+; CHECK-NEXT:    vins.f16 s7, s6
+; CHECK-NEXT:    vmov.f32 s6, s15
+; CHECK-NEXT:    vmovx.f16 s16, s8
+; CHECK-NEXT:    vmovx.f16 s12, s12
+; CHECK-NEXT:    vmovx.f16 s15, s15
+; CHECK-NEXT:    vmov.f32 s20, s14
+; CHECK-NEXT:    vmovx.f16 s10, s10
+; CHECK-NEXT:    vmovx.f16 s17, s3
+; CHECK-NEXT:    vmovx.f16 s19, s9
+; CHECK-NEXT:    vmovx.f16 s18, s14
+; CHECK-NEXT:    vins.f16 s6, s16
+; CHECK-NEXT:    vmovx.f16 s16, s0
+; CHECK-NEXT:    vmovx.f16 s1, s1
+; CHECK-NEXT:    vins.f16 s20, s15
+; CHECK-NEXT:    vins.f16 s3, s12
+; CHECK-NEXT:    vins.f16 s9, s10
+; CHECK-NEXT:    vins.f16 s0, s1
+; CHECK-NEXT:    vins.f16 s16, s2
 ; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s19, s15
-; CHECK-NEXT:    vins.f16 s17, s9
-; CHECK-NEXT:    vins.f16 s0, s12
-; CHECK-NEXT:    vmov.f32 s2, s10
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vins.f16 s6, s7
-; CHECK-NEXT:    vmov.f32 s7, s14
+; CHECK-NEXT:    vins.f16 s17, s13
+; CHECK-NEXT:    vins.f16 s19, s11
+; CHECK-NEXT:    vins.f16 s18, s8
+; CHECK-NEXT:    vmov.f32 s2, s20
+; CHECK-NEXT:    vmov.f32 s3, s9
 ; CHECK-NEXT:    vadd.f16 q0, q0, q4
 ; CHECK-NEXT:    vadd.f16 q0, q0, q1
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vpop {d8, d9, d10}
 ; CHECK-NEXT:    bx lr
 entry:
   %l1 = load <24 x half>, <24 x half>* %src, align 4
@@ -1147,83 +1177,85 @@ entry:
 define void @vld3_v16f16(<48 x half> *%src, <16 x half> *%dst) {
 ; CHECK-LABEL: vld3_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
 ; CHECK-NEXT:    vmov.f32 s4, s1
+; CHECK-NEXT:    vmovx.f16 s6, s2
 ; CHECK-NEXT:    vins.f16 s4, s6
-; CHECK-NEXT:    vmovx.f16 s6, s9
-; CHECK-NEXT:    vmov.f32 s5, s8
-; CHECK-NEXT:    vmovx.f16 s7, s12
+; CHECK-NEXT:    vmov.f32 s5, s12
+; CHECK-NEXT:    vmovx.f16 s6, s13
+; CHECK-NEXT:    vmov.f32 s7, s10
 ; CHECK-NEXT:    vins.f16 s5, s6
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vins.f16 s6, s7
-; CHECK-NEXT:    vmovx.f16 s16, s15
-; CHECK-NEXT:    vmov.f32 s7, s14
+; CHECK-NEXT:    vmovx.f16 s6, s11
+; CHECK-NEXT:    vins.f16 s7, s6
+; CHECK-NEXT:    vmov.f32 s6, s15
+; CHECK-NEXT:    vmovx.f16 s16, s8
+; CHECK-NEXT:    vmovx.f16 s12, s12
+; CHECK-NEXT:    vmovx.f16 s15, s15
+; CHECK-NEXT:    vmov.f32 s20, s14
+; CHECK-NEXT:    vmovx.f16 s10, s10
 ; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s7, s16
+; CHECK-NEXT:    vmovx.f16 s19, s9
+; CHECK-NEXT:    vmovx.f16 s18, s14
+; CHECK-NEXT:    vins.f16 s6, s16
 ; CHECK-NEXT:    vmovx.f16 s16, s0
+; CHECK-NEXT:    vmovx.f16 s1, s1
+; CHECK-NEXT:    vins.f16 s20, s15
+; CHECK-NEXT:    vins.f16 s3, s12
+; CHECK-NEXT:    vins.f16 s9, s10
+; CHECK-NEXT:    vins.f16 s0, s1
 ; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
-; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vmovx.f16 s18, s10
-; CHECK-NEXT:    vins.f16 s10, s2
-; CHECK-NEXT:    vmovx.f16 s2, s14
-; CHECK-NEXT:    vmovx.f16 s19, s13
-; CHECK-NEXT:    vins.f16 s13, s2
+; CHECK-NEXT:    vins.f16 s17, s13
 ; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s18, s12
-; CHECK-NEXT:    vins.f16 s19, s15
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vins.f16 s17, s9
-; CHECK-NEXT:    vmov.f32 s2, s10
+; CHECK-NEXT:    vins.f16 s19, s11
+; CHECK-NEXT:    vins.f16 s18, s8
+; CHECK-NEXT:    vmov.f32 s3, s9
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-NEXT:    vmov.f32 s2, s20
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
 ; CHECK-NEXT:    vadd.f16 q0, q0, q4
-; CHECK-NEXT:    vadd.f16 q2, q0, q1
+; CHECK-NEXT:    vmov.f32 s20, s14
+; CHECK-NEXT:    vadd.f16 q1, q0, q1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
-; CHECK-NEXT:    vmovx.f16 s10, s2
-; CHECK-NEXT:    vmov.f32 s8, s1
-; CHECK-NEXT:    vins.f16 s8, s10
-; CHECK-NEXT:    vmovx.f16 s10, s13
-; CHECK-NEXT:    vmov.f32 s9, s12
-; CHECK-NEXT:    vmovx.f16 s11, s4
-; CHECK-NEXT:    vins.f16 s9, s10
-; CHECK-NEXT:    vmov.f32 s10, s15
-; CHECK-NEXT:    vins.f16 s10, s11
-; CHECK-NEXT:    vmovx.f16 s16, s7
-; CHECK-NEXT:    vmov.f32 s11, s6
+; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s5, s12
+; CHECK-NEXT:    vmov.f32 s4, s1
+; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vins.f16 s4, s6
+; CHECK-NEXT:    vmovx.f16 s6, s13
+; CHECK-NEXT:    vins.f16 s5, s6
+; CHECK-NEXT:    vmov.f32 s7, s10
+; CHECK-NEXT:    vmovx.f16 s6, s11
+; CHECK-NEXT:    vmovx.f16 s16, s8
+; CHECK-NEXT:    vins.f16 s7, s6
+; CHECK-NEXT:    vmov.f32 s6, s15
+; CHECK-NEXT:    vmovx.f16 s15, s15
+; CHECK-NEXT:    vmovx.f16 s12, s12
+; CHECK-NEXT:    vmovx.f16 s10, s10
 ; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s11, s16
+; CHECK-NEXT:    vmovx.f16 s19, s9
+; CHECK-NEXT:    vmovx.f16 s18, s14
+; CHECK-NEXT:    vins.f16 s6, s16
 ; CHECK-NEXT:    vmovx.f16 s16, s0
+; CHECK-NEXT:    vmovx.f16 s1, s1
+; CHECK-NEXT:    vins.f16 s20, s15
+; CHECK-NEXT:    vins.f16 s3, s12
+; CHECK-NEXT:    vins.f16 s9, s10
+; CHECK-NEXT:    vins.f16 s0, s1
 ; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
-; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vmovx.f16 s2, s12
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s15
-; CHECK-NEXT:    vmovx.f16 s18, s14
-; CHECK-NEXT:    vins.f16 s14, s2
-; CHECK-NEXT:    vmovx.f16 s2, s6
-; CHECK-NEXT:    vmovx.f16 s19, s5
-; CHECK-NEXT:    vins.f16 s5, s2
 ; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s18, s4
-; CHECK-NEXT:    vins.f16 s19, s7
 ; CHECK-NEXT:    vins.f16 s17, s13
-; CHECK-NEXT:    vmov.f32 s2, s14
-; CHECK-NEXT:    vmov.f32 s3, s5
+; CHECK-NEXT:    vins.f16 s19, s11
+; CHECK-NEXT:    vins.f16 s18, s8
+; CHECK-NEXT:    vmov.f32 s2, s20
+; CHECK-NEXT:    vmov.f32 s3, s9
 ; CHECK-NEXT:    vadd.f16 q0, q0, q4
-; CHECK-NEXT:    vadd.f16 q0, q0, q2
+; CHECK-NEXT:    vadd.f16 q0, q0, q1
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vpop {d8, d9, d10}
 ; CHECK-NEXT:    bx lr
 entry:
   %l1 = load <48 x half>, <48 x half>* %src, align 4

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst3.ll b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
index 4ba845540da7b..5f29a419fd0ef 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
@@ -43,23 +43,22 @@ define void @vst3_v4i32(<4 x i32> *%src, <12 x i32> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
 ; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q3, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s4, s8
-; CHECK-NEXT:    vmov r0, r2, d0
-; CHECK-NEXT:    vmov.f32 s5, s12
-; CHECK-NEXT:    vmov.f32 s7, s9
-; CHECK-NEXT:    vmov.f32 s16, s13
-; CHECK-NEXT:    vmov.32 q1[2], r0
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s19, s14
+; CHECK-NEXT:    vmov.f32 s8, s5
+; CHECK-NEXT:    vmov.f32 s9, s1
+; CHECK-NEXT:    vmov.f32 s18, s0
 ; CHECK-NEXT:    vmov.f32 s0, s2
-; CHECK-NEXT:    vmov.32 q4[1], r2
-; CHECK-NEXT:    vmov.f32 s1, s11
-; CHECK-NEXT:    vstrw.32 q4, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s2, s15
+; CHECK-NEXT:    vmov.f32 s11, s6
+; CHECK-NEXT:    vmov.f32 s10, s14
+; CHECK-NEXT:    vmov.f32 s16, s12
+; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s17, s4
+; CHECK-NEXT:    vmov.f32 s19, s13
+; CHECK-NEXT:    vmov.f32 s1, s15
+; CHECK-NEXT:    vstrw.32 q4, [r1]
+; CHECK-NEXT:    vmov.f32 s2, s7
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #32]
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
@@ -80,48 +79,49 @@ entry:
 define void @vst3_v8i32(<8 x i32> *%src, <24 x i32> *%dst) {
 ; CHECK-LABEL: vst3_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r7, lr}
-; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #48]
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrw.u32 q7, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vmov.f32 s4, s12
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s12, s28
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #16]
-; CHECK-NEXT:    vmov r2, lr, d0
-; CHECK-NEXT:    vmov r12, r3, d4
-; CHECK-NEXT:    vmov.f32 s0, s2
-; CHECK-NEXT:    vmov.f32 s2, s31
-; CHECK-NEXT:    vmov.f32 s20, s29
-; CHECK-NEXT:    vmov.f32 s9, s15
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
+; CHECK-NEXT:    vstrw.32 q7, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q6, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #64]
+; CHECK-NEXT:    vmov.f32 s8, s2
+; CHECK-NEXT:    vmov.f32 s20, s28
+; CHECK-NEXT:    vmov.f32 s9, s19
+; CHECK-NEXT:    vmov.f32 s28, s16
+; CHECK-NEXT:    vmov.f32 s31, s17
+; CHECK-NEXT:    vmov.f32 s2, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s11, s3
+; CHECK-NEXT:    vmov.f32 s10, s15
+; CHECK-NEXT:    vmov.f32 s23, s29
+; CHECK-NEXT:    vstrw.32 q2, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s22, s4
+; CHECK-NEXT:    vmov.f32 s21, s24
 ; CHECK-NEXT:    vmov.f32 s29, s12
-; CHECK-NEXT:    vmov.f32 s5, s16
-; CHECK-NEXT:    vmov.f32 s7, s13
-; CHECK-NEXT:    vmov.f32 s22, s26
-; CHECK-NEXT:    vmov.32 q1[2], r12
-; CHECK-NEXT:    vmov.f32 s23, s30
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s28, s24
-; CHECK-NEXT:    vmov.32 q5[1], lr
-; CHECK-NEXT:    vmov.f32 s31, s25
-; CHECK-NEXT:    vstrw.32 q5, [r1, #64]
-; CHECK-NEXT:    vmov.f32 s12, s17
-; CHECK-NEXT:    vmov.32 q7[2], r2
-; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vstrw.32 q5, [r1]
+; CHECK-NEXT:    vmov.f32 s30, s0
+; CHECK-NEXT:    vmov.f32 s0, s13
 ; CHECK-NEXT:    vstrw.32 q7, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s1, s27
-; CHECK-NEXT:    vmov.32 q3[1], r3
-; CHECK-NEXT:    vmov.f32 s8, s10
+; CHECK-NEXT:    vmov.f32 s3, s14
+; CHECK-NEXT:    vmov.f32 s13, s5
+; CHECK-NEXT:    vstrw.32 q0, [r1, #64]
+; CHECK-NEXT:    vmov.f32 s4, s6
+; CHECK-NEXT:    vmov.f32 s12, s25
+; CHECK-NEXT:    vmov.f32 s15, s26
+; CHECK-NEXT:    vmov.f32 s14, s18
+; CHECK-NEXT:    vmov.f32 s5, s19
 ; CHECK-NEXT:    vstrw.32 q3, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s10, s19
-; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
-; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
+; CHECK-NEXT:    vmov.f32 s6, s27
+; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
+; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    bx lr
 entry:
   %s1 = getelementptr <8 x i32>, <8 x i32>* %src, i32 0
   %l1 = load <8 x i32>, <8 x i32>* %s1, align 4
@@ -139,112 +139,110 @@ entry:
 define void @vst3_v16i32(<16 x i32> *%src, <48 x i32> *%dst) {
 ; CHECK-LABEL: vst3_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    .pad #160
-; CHECK-NEXT:    sub sp, #160
+; CHECK-NEXT:    .pad #144
+; CHECK-NEXT:    sub sp, #144
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #96]
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #160]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
-; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vstrw.32 q3, [sp, #144] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vmov r12, r3, d10
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #176]
-; CHECK-NEXT:    vstrw.32 q3, [sp, #80] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #96]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s8, s1
-; CHECK-NEXT:    vstrw.32 q3, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s10, s6
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s11, s2
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #128]
+; CHECK-NEXT:    vstrw.32 q7, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #80]
+; CHECK-NEXT:    vstrw.32 q3, [sp, #128] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-NEXT:    vstrw.32 q7, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q6, [r0]
 ; CHECK-NEXT:    vstrw.32 q3, [sp, #64] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
-; CHECK-NEXT:    vmov.32 q2[1], r3
-; CHECK-NEXT:    vstrw.32 q6, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #16]
-; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s20, s22
-; CHECK-NEXT:    vmov.f32 s22, s3
+; CHECK-NEXT:    vstrw.32 q7, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #16]
+; CHECK-NEXT:    vmov.f32 s16, s1
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #176]
+; CHECK-NEXT:    vmov.f32 s19, s2
 ; CHECK-NEXT:    vstrw.32 q7, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s9, s0
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s11, s5
-; CHECK-NEXT:    vmov.f32 s0, s30
-; CHECK-NEXT:    vstrw.32 q2, [sp, #128] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s1, s15
+; CHECK-NEXT:    vmov.f32 s17, s9
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
+; CHECK-NEXT:    vmov.f32 s18, s26
+; CHECK-NEXT:    vldrw.u32 q7, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #144]
+; CHECK-NEXT:    vstrw.32 q4, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmov.f32 s16, s10
+; CHECK-NEXT:    vmov.f32 s17, s27
+; CHECK-NEXT:    vmov.f32 s19, s11
+; CHECK-NEXT:    vstrw.32 q4, [r1, #32]
+; CHECK-NEXT:    vmov.f32 s16, s6
+; CHECK-NEXT:    vmov.f32 s19, s7
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vmov.f32 s18, s31
+; CHECK-NEXT:    vstrw.32 q4, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s18, s8
+; CHECK-NEXT:    vmov.f64 d4, d14
+; CHECK-NEXT:    vmov.f32 s2, s4
+; CHECK-NEXT:    vmov.f32 s1, s8
+; CHECK-NEXT:    vmov.f32 s4, s9
 ; CHECK-NEXT:    vldrw.u32 q2, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s2, s19
-; CHECK-NEXT:    vmov.f32 s3, s31
-; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s0, s17
-; CHECK-NEXT:    vmov.f32 s2, s14
-; CHECK-NEXT:    vmov.f32 s3, s18
-; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #144] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s21, s7
-; CHECK-NEXT:    vldrw.u32 q1, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f64 d0, d4
-; CHECK-NEXT:    vstrw.32 q5, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s21, s7
+; CHECK-NEXT:    vmov.f32 s17, s0
+; CHECK-NEXT:    vmov.f32 s3, s13
+; CHECK-NEXT:    vmov.f32 s0, s12
 ; CHECK-NEXT:    vmov.f64 d14, d4
-; CHECK-NEXT:    vmov.f32 s20, s2
-; CHECK-NEXT:    vmov.f32 s23, s3
-; CHECK-NEXT:    vstrw.32 q5, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s21, s16
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vmov.f32 s16, s1
-; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #128] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s16, s24
+; CHECK-NEXT:    vmov.f32 s19, s25
+; CHECK-NEXT:    vstrw.32 q4, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s17, s1
+; CHECK-NEXT:    vmov.f32 s12, s2
+; CHECK-NEXT:    vmov.f32 s15, s3
+; CHECK-NEXT:    vmov q0, q5
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f64 d0, d14
+; CHECK-NEXT:    vldrw.u32 q5, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s6, s14
+; CHECK-NEXT:    vmov.f32 s7, s30
+; CHECK-NEXT:    vstrw.32 q1, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q1, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s31, s1
+; CHECK-NEXT:    vmov.f64 d0, d10
+; CHECK-NEXT:    vmov.f32 s16, s5
+; CHECK-NEXT:    vmov.f32 s19, s6
+; CHECK-NEXT:    vmov.f32 s14, s7
+; CHECK-NEXT:    vmov.f32 s29, s4
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #128] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s24, s2
+; CHECK-NEXT:    vmov.f32 s30, s4
+; CHECK-NEXT:    vmov.f32 s27, s3
+; CHECK-NEXT:    vstrw.32 q7, [r1, #96]
+; CHECK-NEXT:    vmov.f32 s4, s0
+; CHECK-NEXT:    vmov.f32 s7, s1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s18, s10
+; CHECK-NEXT:    vmov.f64 d10, d0
 ; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s18, s6
-; CHECK-NEXT:    vmov.f32 s15, s5
-; CHECK-NEXT:    vmov.f32 s5, s27
-; CHECK-NEXT:    vmov.f32 s8, s24
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    vmov.f32 s9, s0
-; CHECK-NEXT:    vmov.f32 s24, s1
-; CHECK-NEXT:    vmov.f32 s27, s2
+; CHECK-NEXT:    vstrw.32 q4, [r1, #112]
+; CHECK-NEXT:    vstrw.32 q0, [r1, #144]
 ; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov r0, r3, d14
-; CHECK-NEXT:    vldrw.u32 q7, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s7, s11
-; CHECK-NEXT:    vstrw.32 q0, [r1, #128]
-; CHECK-NEXT:    vmov.f32 s11, s25
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s20, s12
-; CHECK-NEXT:    vmov.32 q6[1], r3
-; CHECK-NEXT:    vmov.f32 s12, s4
-; CHECK-NEXT:    vstrw.32 q6, [r1, #64]
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vmov.32 q2[2], r0
-; CHECK-NEXT:    vmov r0, lr, d14
-; CHECK-NEXT:    vldrw.u32 q7, [sp, #144] @ 16-byte Reload
-; CHECK-NEXT:    vmov.32 q0[1], lr
-; CHECK-NEXT:    vmov.32 q5[2], r0
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #160]
 ; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vmov r2, r4, d14
-; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s13, s11
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #176]
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #128] @ 16-byte Reload
-; CHECK-NEXT:    vmov.32 q3[2], r2
-; CHECK-NEXT:    vmov.32 q4[1], r4
-; CHECK-NEXT:    vmov.32 q0[2], r12
-; CHECK-NEXT:    vstrw.32 q1, [r1, #80]
-; CHECK-NEXT:    vstrw.32 q3, [r1, #96]
-; CHECK-NEXT:    vstrw.32 q4, [r1, #112]
-; CHECK-NEXT:    vstrw.32 q5, [r1, #144]
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s25, s23
+; CHECK-NEXT:    vstrw.32 q3, [r1, #128]
+; CHECK-NEXT:    vmov.f32 s26, s11
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    add sp, #160
+; CHECK-NEXT:    vmov.f32 s6, s20
+; CHECK-NEXT:    vstrw.32 q6, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s5, s8
+; CHECK-NEXT:    vmov.f32 s20, s9
+; CHECK-NEXT:    vstrw.32 q1, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s23, s10
+; CHECK-NEXT:    vstrw.32 q5, [r1, #64]
+; CHECK-NEXT:    add sp, #144
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    bx lr
 entry:
   %s1 = getelementptr <16 x i32>, <16 x i32>* %src, i32 0
   %l1 = load <16 x i32>, <16 x i32>* %s1, align 4
@@ -266,21 +264,20 @@ define void @vst3_v2i16(<2 x i16> *%src, <6 x i16> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
 ; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    ldrh r2, [r0, #10]
-; CHECK-NEXT:    ldrh r4, [r0, #8]
-; CHECK-NEXT:    ldrh.w r12, [r0, #2]
-; CHECK-NEXT:    ldrh.w lr, [r0]
-; CHECK-NEXT:    vmov q0[2], q0[0], r4, r2
-; CHECK-NEXT:    ldrh r3, [r0, #6]
-; CHECK-NEXT:    ldrh r0, [r0, #4]
-; CHECK-NEXT:    vmov q1[2], q1[0], r0, r3
-; CHECK-NEXT:    vmov q2, q1
-; CHECK-NEXT:    vmovnt.i32 q2, q0
-; CHECK-NEXT:    vmov q0[2], q0[0], lr, r12
-; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    ldrh r2, [r0, #6]
+; CHECK-NEXT:    ldrh.w lr, [r0, #4]
+; CHECK-NEXT:    ldrh.w r12, [r0, #8]
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    ldrh r3, [r0, #2]
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r2
+; CHECK-NEXT:    ldrh r4, [r0]
+; CHECK-NEXT:    ldrh r0, [r0, #10]
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov q0[2], q0[0], r4, r3
 ; CHECK-NEXT:    vmov.f32 s1, s4
 ; CHECK-NEXT:    vmov.f32 s3, s2
-; CHECK-NEXT:    vmov.32 q0[2], r4
+; CHECK-NEXT:    vmov.32 q0[2], r12
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    str r0, [r1, #8]
 ; CHECK-NEXT:    pop {r4, pc}
@@ -343,55 +340,52 @@ entry:
 define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) {
 ; CHECK-LABEL: vst3_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s12, s7
-; CHECK-NEXT:    vmov.u16 r2, q2[5]
-; CHECK-NEXT:    vmov.16 q0[0], r2
-; CHECK-NEXT:    vins.f16 s12, s11
-; CHECK-NEXT:    vmov.f32 s1, s12
-; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.f32 s0, s8
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vins.f16 s0, s4
+; CHECK-NEXT:    vmov.f32 s12, s9
+; CHECK-NEXT:    vins.f16 s12, s5
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmov.f32 s3, s12
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vmov.16 q0[6], r2
-; CHECK-NEXT:    vmov.f32 s2, s7
-; CHECK-NEXT:    vmov.u16 r0, q2[3]
-; CHECK-NEXT:    vmovx.f16 s7, s14
-; CHECK-NEXT:    vmov.16 q4[2], r0
-; CHECK-NEXT:    vins.f16 s0, s7
-; CHECK-NEXT:    vmovx.f16 s7, s15
-; CHECK-NEXT:    vins.f16 s3, s7
-; CHECK-NEXT:    vmov.f32 s7, s6
-; CHECK-NEXT:    vmovx.f16 s2, s2
-; CHECK-NEXT:    vins.f16 s7, s10
-; CHECK-NEXT:    vmov.f32 s20, s4
-; CHECK-NEXT:    vins.f16 s15, s2
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vins.f16 s20, s8
-; CHECK-NEXT:    vmov.f32 s7, s6
-; CHECK-NEXT:    vmovx.f16 s6, s5
-; CHECK-NEXT:    vmov.f32 s2, s15
-; CHECK-NEXT:    vmovx.f16 s15, s13
-; CHECK-NEXT:    vins.f16 s13, s6
-; CHECK-NEXT:    vmovx.f16 s6, s7
-; CHECK-NEXT:    vmov.u16 r0, q2[1]
-; CHECK-NEXT:    vmovx.f16 s4, s4
-; CHECK-NEXT:    vins.f16 s14, s6
-; CHECK-NEXT:    vmovx.f16 s6, s12
-; CHECK-NEXT:    vmov.16 q5[4], r0
-; CHECK-NEXT:    vins.f16 s5, s9
-; CHECK-NEXT:    vins.f16 s12, s4
-; CHECK-NEXT:    vins.f16 s17, s15
-; CHECK-NEXT:    vmov.f32 s16, s13
-; CHECK-NEXT:    vins.f16 s22, s6
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vstrw.32 q0, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s23, s5
-; CHECK-NEXT:    vstrw.32 q4, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s21, s12
-; CHECK-NEXT:    vstrw.32 q5, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    vmovx.f16 s8, s8
+; CHECK-NEXT:    vmovx.f16 s16, s6
+; CHECK-NEXT:    vmov.f32 s1, s12
+; CHECK-NEXT:    vins.f16 s17, s7
+; CHECK-NEXT:    vins.f16 s1, s8
+; CHECK-NEXT:    vmovx.f16 s8, s12
+; CHECK-NEXT:    vins.f16 s2, s8
+; CHECK-NEXT:    vmovx.f16 s8, s14
+; CHECK-NEXT:    vins.f16 s16, s8
+; CHECK-NEXT:    vmovx.f16 s19, s7
+; CHECK-NEXT:    vmovx.f16 s8, s15
+; CHECK-NEXT:    vmov.f32 s18, s15
+; CHECK-NEXT:    vins.f16 s19, s8
+; CHECK-NEXT:    vmovx.f16 s8, s17
+; CHECK-NEXT:    vmov.f32 s17, s11
+; CHECK-NEXT:    vmovx.f16 s12, s9
+; CHECK-NEXT:    vins.f16 s17, s8
+; CHECK-NEXT:    vmovx.f16 s8, s11
+; CHECK-NEXT:    vins.f16 s18, s8
+; CHECK-NEXT:    vmov.f32 s8, s13
+; CHECK-NEXT:    vins.f16 s8, s12
+; CHECK-NEXT:    vmovx.f16 s12, s10
+; CHECK-NEXT:    vins.f16 s14, s12
+; CHECK-NEXT:    vrev32.16 q1, q1
+; CHECK-NEXT:    vmovx.f16 s12, s13
+; CHECK-NEXT:    vmovx.f16 s4, s6
+; CHECK-NEXT:    vins.f16 s5, s12
+; CHECK-NEXT:    vmov.f32 s11, s14
+; CHECK-NEXT:    vins.f16 s10, s4
+; CHECK-NEXT:    vmov.f32 s9, s5
+; CHECK-NEXT:    vstrw.32 q4, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vstrw.32 q0, [r1]
+; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
   %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
@@ -412,111 +406,112 @@ define void @vst3_v16i16(<16 x i16> *%src, <48 x i16> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    .pad #48
-; CHECK-NEXT:    sub sp, #48
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s0, s11
-; CHECK-NEXT:    vmov.u16 r2, q1[5]
-; CHECK-NEXT:    vmov.16 q3[0], r2
-; CHECK-NEXT:    vins.f16 s0, s7
-; CHECK-NEXT:    vmov.f32 s2, s11
-; CHECK-NEXT:    vmov.u16 r2, q1[7]
-; CHECK-NEXT:    vmov.f64 d12, d4
-; CHECK-NEXT:    vstrw.32 q1, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s26, s10
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vstrw.32 q6, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.16 q3[6], r2
+; CHECK-NEXT:    .pad #64
+; CHECK-NEXT:    sub sp, #64
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q6, [r0]
+; CHECK-NEXT:    vstrw.32 q1, [sp, #48] @ 16-byte Spill
 ; CHECK-NEXT:    vmovx.f16 s0, s10
-; CHECK-NEXT:    vins.f16 s12, s0
-; CHECK-NEXT:    vmovx.f16 s0, s2
-; CHECK-NEXT:    vmov.f32 s14, s11
-; CHECK-NEXT:    vins.f16 s14, s0
-; CHECK-NEXT:    vmov.f32 s20, s7
-; CHECK-NEXT:    vmov q0, q3
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
-; CHECK-NEXT:    vmov.u16 r2, q3[5]
-; CHECK-NEXT:    vins.f16 s20, s15
-; CHECK-NEXT:    vmov.16 q4[0], r2
-; CHECK-NEXT:    vmov.u16 r2, q3[7]
-; CHECK-NEXT:    vmov.f32 s17, s20
-; CHECK-NEXT:    vmovx.f16 s20, s31
-; CHECK-NEXT:    vmov.16 q4[6], r2
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmovx.f16 s7, s30
-; CHECK-NEXT:    vins.f16 s16, s7
-; CHECK-NEXT:    vmovx.f16 s7, s18
-; CHECK-NEXT:    vins.f16 s31, s7
-; CHECK-NEXT:    vmovx.f16 s7, s11
-; CHECK-NEXT:    vins.f16 s3, s7
-; CHECK-NEXT:    vins.f16 s19, s20
-; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmovx.f16 s4, s6
+; CHECK-NEXT:    vins.f16 s1, s7
+; CHECK-NEXT:    vins.f16 s4, s0
+; CHECK-NEXT:    vmovx.f16 s0, s11
+; CHECK-NEXT:    vmovx.f16 s7, s7
+; CHECK-NEXT:    vmov.f32 s12, s4
+; CHECK-NEXT:    vins.f16 s7, s0
+; CHECK-NEXT:    vmovx.f16 s4, s1
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
+; CHECK-NEXT:    vmov.f32 s18, s11
+; CHECK-NEXT:    vmov.f32 s15, s7
+; CHECK-NEXT:    vstrw.32 q6, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s13, s3
+; CHECK-NEXT:    vins.f16 s13, s4
+; CHECK-NEXT:    vmovx.f16 s4, s3
+; CHECK-NEXT:    vins.f16 s18, s4
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
 ; CHECK-NEXT:    vmov.f32 s20, s24
-; CHECK-NEXT:    vmovx.f16 s11, s8
-; CHECK-NEXT:    vmov.f32 s7, s25
-; CHECK-NEXT:    vins.f16 s20, s0
-; CHECK-NEXT:    vmov.u16 r0, q0[1]
-; CHECK-NEXT:    vins.f16 s7, s1
-; CHECK-NEXT:    vmov.16 q5[4], r0
+; CHECK-NEXT:    vins.f16 s20, s4
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vmov.16 q5[4], r2
+; CHECK-NEXT:    vstrw.32 q1, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s23, s25
+; CHECK-NEXT:    vmovx.f16 s4, s24
+; CHECK-NEXT:    vldrw.u32 q6, [r0, #64]
+; CHECK-NEXT:    vmov.f32 s14, s18
+; CHECK-NEXT:    vins.f16 s23, s5
+; CHECK-NEXT:    vstrw.32 q3, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s5, s24
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s28, s0
+; CHECK-NEXT:    vins.f16 s5, s4
+; CHECK-NEXT:    vmovx.f16 s4, s24
 ; CHECK-NEXT:    vmov.u16 r0, q3[1]
-; CHECK-NEXT:    vmov.f32 s23, s7
-; CHECK-NEXT:    vmovx.f16 s7, s24
-; CHECK-NEXT:    vmov.f32 s24, s4
-; CHECK-NEXT:    vins.f16 s8, s7
-; CHECK-NEXT:    vins.f16 s24, s12
-; CHECK-NEXT:    vmov.f32 s21, s8
-; CHECK-NEXT:    vmov.f32 s8, s5
-; CHECK-NEXT:    vmov.16 q6[4], r0
-; CHECK-NEXT:    vins.f16 s8, s13
-; CHECK-NEXT:    vmovx.f16 s4, s4
-; CHECK-NEXT:    vmov.f32 s27, s8
-; CHECK-NEXT:    vmovx.f16 s8, s28
-; CHECK-NEXT:    vins.f16 s28, s4
-; CHECK-NEXT:    vmov.f32 s4, s6
-; CHECK-NEXT:    vmov.u16 r0, q3[3]
-; CHECK-NEXT:    vins.f16 s4, s14
-; CHECK-NEXT:    vmov.16 q0[2], r0
-; CHECK-NEXT:    vmov.f32 s18, s31
-; CHECK-NEXT:    vmov.f32 s2, s4
-; CHECK-NEXT:    vmovx.f16 s4, s29
-; CHECK-NEXT:    vmovx.f16 s0, s5
-; CHECK-NEXT:    vins.f16 s1, s4
-; CHECK-NEXT:    vmovx.f16 s4, s6
-; CHECK-NEXT:    vins.f16 s29, s0
-; CHECK-NEXT:    vins.f16 s30, s4
+; CHECK-NEXT:    vins.f16 s28, s12
+; CHECK-NEXT:    vins.f16 s22, s4
+; CHECK-NEXT:    vmov.f32 s4, s1
+; CHECK-NEXT:    vmov.16 q7[4], r0
+; CHECK-NEXT:    vins.f16 s4, s13
+; CHECK-NEXT:    vmov.f32 s21, s5
+; CHECK-NEXT:    vmov.f32 s31, s4
 ; CHECK-NEXT:    vldrw.u32 q1, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s25, s28
+; CHECK-NEXT:    vmovx.f16 s0, s0
+; CHECK-NEXT:    vmov.f32 s29, s8
+; CHECK-NEXT:    vins.f16 s29, s0
+; CHECK-NEXT:    vmovx.f16 s0, s8
+; CHECK-NEXT:    vins.f16 s30, s0
+; CHECK-NEXT:    vmovx.f16 s4, s6
+; CHECK-NEXT:    vmovx.f16 s0, s26
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vins.f16 s4, s0
+; CHECK-NEXT:    vins.f16 s5, s7
+; CHECK-NEXT:    vmovx.f16 s7, s7
+; CHECK-NEXT:    vmovx.f16 s0, s27
+; CHECK-NEXT:    vins.f16 s7, s0
+; CHECK-NEXT:    vmovx.f16 s0, s5
+; CHECK-NEXT:    vmov.f32 s13, s19
+; CHECK-NEXT:    vmovx.f16 s6, s1
+; CHECK-NEXT:    vins.f16 s13, s0
+; CHECK-NEXT:    vmov.f32 s14, s27
+; CHECK-NEXT:    vmovx.f16 s0, s19
+; CHECK-NEXT:    vmov.f32 s12, s25
+; CHECK-NEXT:    vins.f16 s14, s0
+; CHECK-NEXT:    vmov.f32 s0, s9
+; CHECK-NEXT:    vins.f16 s0, s6
+; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vins.f16 s10, s6
+; CHECK-NEXT:    vmovx.f16 s6, s9
+; CHECK-NEXT:    vmov.f32 s3, s10
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s5, s13
+; CHECK-NEXT:    vstrw.32 q7, [r1, #48]
+; CHECK-NEXT:    vrev32.16 q2, q2
+; CHECK-NEXT:    vstrw.32 q5, [r1]
+; CHECK-NEXT:    vmovx.f16 s8, s17
+; CHECK-NEXT:    vins.f16 s9, s6
+; CHECK-NEXT:    vmovx.f16 s6, s10
+; CHECK-NEXT:    vins.f16 s12, s8
+; CHECK-NEXT:    vmovx.f16 s8, s18
+; CHECK-NEXT:    vmov.f32 s10, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #32] @ 16-byte Reload
 ; CHECK-NEXT:    vins.f16 s26, s8
-; CHECK-NEXT:    vmov.f32 s0, s29
-; CHECK-NEXT:    vmov.u16 r0, q1[3]
-; CHECK-NEXT:    vmov.f32 s3, s30
-; CHECK-NEXT:    vldrw.u32 q7, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vins.f16 s22, s11
-; CHECK-NEXT:    vstrw.32 q6, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s8, s30
+; CHECK-NEXT:    vmov.f32 s15, s26
+; CHECK-NEXT:    vmovx.f16 s8, s25
+; CHECK-NEXT:    vrev32.16 q6, q4
+; CHECK-NEXT:    vins.f16 s2, s6
+; CHECK-NEXT:    vins.f16 s25, s8
+; CHECK-NEXT:    vmov.f32 s1, s9
+; CHECK-NEXT:    vmovx.f16 s8, s26
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #64]
-; CHECK-NEXT:    vins.f16 s8, s6
-; CHECK-NEXT:    vmov.16 q1[2], r0
-; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vmovx.f16 s8, s9
-; CHECK-NEXT:    vmovx.f16 s4, s29
-; CHECK-NEXT:    vins.f16 s5, s8
-; CHECK-NEXT:    vmovx.f16 s8, s30
-; CHECK-NEXT:    vins.f16 s9, s4
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
 ; CHECK-NEXT:    vins.f16 s10, s8
-; CHECK-NEXT:    vmov.f32 s4, s9
-; CHECK-NEXT:    vmov.f32 s7, s10
-; CHECK-NEXT:    vstrw.32 q4, [r1, #80]
-; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
-; CHECK-NEXT:    vldrw.u32 q1, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q5, [r1]
+; CHECK-NEXT:    vmov.f32 s6, s14
+; CHECK-NEXT:    vmov.f32 s14, s10
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
-; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vmov.f32 s13, s25
+; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
+; CHECK-NEXT:    vstrw.32 q3, [r1, #16]
+; CHECK-NEXT:    add sp, #64
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -633,22 +628,24 @@ define void @vst3_v8i8(<8 x i8> *%src, <24 x i8> *%dst) {
 ; CHECK-NEXT:    vldrb.u16 q2, [r0, #8]
 ; CHECK-NEXT:    vldrb.u16 q1, [r0, #16]
 ; CHECK-NEXT:    vldrb.u16 q3, [r0]
+; CHECK-NEXT:    vins.f16 s1, s11
 ; CHECK-NEXT:    vmovx.f16 s2, s6
 ; CHECK-NEXT:    vmovx.f16 s0, s10
+; CHECK-NEXT:    vmovx.f16 s3, s11
 ; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vins.f16 s1, s11
 ; CHECK-NEXT:    vmovx.f16 s2, s7
-; CHECK-NEXT:    vmovx.f16 s3, s11
+; CHECK-NEXT:    vmovx.f16 s1, s1
+; CHECK-NEXT:    vmov.f32 s17, s15
 ; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s16, s1
-; CHECK-NEXT:    vmov.f32 s1, s15
-; CHECK-NEXT:    vmovx.f16 s18, s15
+; CHECK-NEXT:    vins.f16 s17, s1
 ; CHECK-NEXT:    vmov.f32 s2, s7
+; CHECK-NEXT:    vmovx.f16 s1, s15
 ; CHECK-NEXT:    vmov.u16 r0, q3[0]
-; CHECK-NEXT:    vins.f16 s1, s16
-; CHECK-NEXT:    vins.f16 s2, s18
+; CHECK-NEXT:    vins.f16 s2, s1
+; CHECK-NEXT:    vmov.f32 s1, s17
 ; CHECK-NEXT:    vmov.8 q4[0], r0
 ; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vstrb.16 q0, [r1, #16]
 ; CHECK-NEXT:    vmov.8 q4[1], r0
 ; CHECK-NEXT:    vmov.u16 r0, q1[0]
 ; CHECK-NEXT:    vmov.8 q4[2], r0
@@ -678,7 +675,6 @@ define void @vst3_v8i8(<8 x i8> *%src, <24 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.8 q4[14], r0
 ; CHECK-NEXT:    vmov.u16 r0, q3[5]
 ; CHECK-NEXT:    vmov.8 q4[15], r0
-; CHECK-NEXT:    vstrb.16 q0, [r1, #16]
 ; CHECK-NEXT:    vstrw.32 q4, [r1]
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
@@ -701,66 +697,13 @@ define void @vst3_v16i8(<16 x i8> *%src, <48 x i8> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
 ; CHECK-NEXT:    vldrw.u32 q3, [r0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vmov.u8 r2, q2[5]
-; CHECK-NEXT:    vmov.8 q4[0], r2
-; CHECK-NEXT:    vmov.u8 r2, q3[6]
-; CHECK-NEXT:    vmov.8 q4[2], r2
-; CHECK-NEXT:    vmov.u8 r2, q2[6]
-; CHECK-NEXT:    vmov.8 q4[3], r2
-; CHECK-NEXT:    vmov.u8 r2, q3[7]
-; CHECK-NEXT:    vmov.8 q4[5], r2
-; CHECK-NEXT:    vmov.u8 r2, q2[7]
-; CHECK-NEXT:    vmov.8 q4[6], r2
-; CHECK-NEXT:    vmov.u8 r2, q3[8]
-; CHECK-NEXT:    vmov.8 q4[8], r2
-; CHECK-NEXT:    vmov.u8 r2, q2[8]
-; CHECK-NEXT:    vmov.8 q4[9], r2
-; CHECK-NEXT:    vmov.u8 r2, q3[9]
-; CHECK-NEXT:    vmov.8 q4[11], r2
-; CHECK-NEXT:    vmov.u8 r2, q2[9]
-; CHECK-NEXT:    vmov.8 q4[12], r2
-; CHECK-NEXT:    vmov.u8 r2, q3[10]
-; CHECK-NEXT:    vmov.8 q4[14], r2
-; CHECK-NEXT:    vmov.u8 r2, q2[10]
-; CHECK-NEXT:    vmov.8 q4[15], r2
-; CHECK-NEXT:    vmov.u8 r0, q1[5]
-; CHECK-NEXT:    vmov.u8 r2, q4[0]
-; CHECK-NEXT:    vmov.8 q0[0], r2
-; CHECK-NEXT:    vmov.8 q0[1], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[2]
-; CHECK-NEXT:    vmov.8 q0[2], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[3]
-; CHECK-NEXT:    vmov.8 q0[3], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[6]
-; CHECK-NEXT:    vmov.8 q0[4], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[5]
-; CHECK-NEXT:    vmov.8 q0[5], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[6]
-; CHECK-NEXT:    vmov.8 q0[6], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[7]
-; CHECK-NEXT:    vmov.8 q0[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[8]
-; CHECK-NEXT:    vmov.8 q0[8], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[9]
-; CHECK-NEXT:    vmov.8 q0[9], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[8]
-; CHECK-NEXT:    vmov.8 q0[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[11]
-; CHECK-NEXT:    vmov.8 q0[11], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[12]
-; CHECK-NEXT:    vmov.8 q0[12], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[9]
-; CHECK-NEXT:    vmov.8 q0[13], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[14]
-; CHECK-NEXT:    vmov.8 q0[14], r0
-; CHECK-NEXT:    vmov.u8 r0, q4[15]
-; CHECK-NEXT:    vmov.8 q0[15], r0
-; CHECK-NEXT:    vmov.u8 r0, q3[0]
-; CHECK-NEXT:    vmov.8 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r3, q3[0]
 ; CHECK-NEXT:    vmov.u8 r0, q2[0]
+; CHECK-NEXT:    vmov.8 q5[0], r3
+; CHECK-NEXT:    vmov.u8 r2, q1[0]
 ; CHECK-NEXT:    vmov.8 q5[1], r0
 ; CHECK-NEXT:    vmov.u8 r0, q3[1]
 ; CHECK-NEXT:    vmov.8 q5[3], r0
@@ -775,98 +718,180 @@ define void @vst3_v16i8(<16 x i8> *%src, <48 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.u8 r0, q2[3]
 ; CHECK-NEXT:    vmov.8 q5[10], r0
 ; CHECK-NEXT:    vmov.u8 r0, q3[4]
+; CHECK-NEXT:    vmov.8 q4[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[2]
 ; CHECK-NEXT:    vmov.8 q5[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q2[4]
+; CHECK-NEXT:    vmov.8 q4[8], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[3]
 ; CHECK-NEXT:    vmov.8 q5[13], r0
 ; CHECK-NEXT:    vmov.u8 r0, q3[5]
 ; CHECK-NEXT:    vmov.8 q5[15], r0
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-NEXT:    vmov.8 q4[11], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[4]
+; CHECK-NEXT:    vmov.u8 r0, q5[0]
+; CHECK-NEXT:    vmov.8 q4[14], r2
+; CHECK-NEXT:    vmov.8 q0[0], r0
+; CHECK-NEXT:    vmov.f32 s17, s4
+; CHECK-NEXT:    vmov.u8 r0, q5[1]
+; CHECK-NEXT:    vmov.8 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r2, q4[2]
+; CHECK-NEXT:    vmov.8 q0[2], r2
+; CHECK-NEXT:    vmov.u8 r0, q5[3]
+; CHECK-NEXT:    vmov.8 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[4]
+; CHECK-NEXT:    vmov.8 q0[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[5]
+; CHECK-NEXT:    vmov.8 q0[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[6]
+; CHECK-NEXT:    vmov.8 q0[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[7]
+; CHECK-NEXT:    vmov.8 q0[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[8]
+; CHECK-NEXT:    vmov.8 q0[8], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[9]
+; CHECK-NEXT:    vmov.8 q0[9], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[10]
+; CHECK-NEXT:    vmov.8 q0[10], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[11]
+; CHECK-NEXT:    vmov.8 q0[11], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[12]
+; CHECK-NEXT:    vmov.8 q0[12], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[13]
+; CHECK-NEXT:    vmov.8 q0[13], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[14]
+; CHECK-NEXT:    vmov.8 q0[14], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[15]
+; CHECK-NEXT:    vmov.8 q0[15], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[5]
+; CHECK-NEXT:    vmov.8 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.8 q5[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[6]
+; CHECK-NEXT:    vmov.8 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.8 q5[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[7]
+; CHECK-NEXT:    vmov.8 q5[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.8 q5[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[8]
+; CHECK-NEXT:    vmov.8 q5[9], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vmov.8 q5[10], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[9]
+; CHECK-NEXT:    vmov.8 q5[12], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.8 q5[13], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[10]
+; CHECK-NEXT:    vmov.8 q5[15], r0
+; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    vmov.u8 r0, q5[0]
 ; CHECK-NEXT:    vmov.8 q4[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[1]
 ; CHECK-NEXT:    vmov.8 q4[1], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vmov.u8 r0, q3[7]
+; CHECK-NEXT:    vmov.8 q6[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[8]
+; CHECK-NEXT:    vmov.8 q6[8], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[9]
+; CHECK-NEXT:    vmov.8 q6[11], r0
+; CHECK-NEXT:    vmov.f32 s24, s13
+; CHECK-NEXT:    vmov.f32 s27, s14
+; CHECK-NEXT:    vmov.u8 r0, q6[2]
 ; CHECK-NEXT:    vmov.8 q4[2], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[3]
 ; CHECK-NEXT:    vmov.8 q4[3], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[4]
 ; CHECK-NEXT:    vmov.8 q4[4], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.u8 r0, q6[5]
 ; CHECK-NEXT:    vmov.8 q4[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[6]
 ; CHECK-NEXT:    vmov.8 q4[6], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[7]
 ; CHECK-NEXT:    vmov.8 q4[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.u8 r0, q6[8]
 ; CHECK-NEXT:    vmov.8 q4[8], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[9]
 ; CHECK-NEXT:    vmov.8 q4[9], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[10]
 ; CHECK-NEXT:    vmov.8 q4[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.u8 r0, q6[11]
 ; CHECK-NEXT:    vmov.8 q4[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[12]
 ; CHECK-NEXT:    vmov.8 q4[12], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[13]
 ; CHECK-NEXT:    vmov.8 q4[13], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vmov.u8 r0, q6[14]
 ; CHECK-NEXT:    vmov.8 q4[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q5[15]
 ; CHECK-NEXT:    vmov.8 q4[15], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[10]
 ; CHECK-NEXT:    vmov.8 q5[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q3[11]
-; CHECK-NEXT:    vmov.8 q6[1], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[11]
-; CHECK-NEXT:    vmov.8 q6[2], r0
-; CHECK-NEXT:    vmov.u8 r0, q3[12]
-; CHECK-NEXT:    vmov.8 q6[4], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[12]
-; CHECK-NEXT:    vmov.8 q6[5], r0
-; CHECK-NEXT:    vmov.u8 r0, q3[13]
-; CHECK-NEXT:    vmov.8 q6[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[13]
-; CHECK-NEXT:    vmov.8 q6[8], r0
-; CHECK-NEXT:    vmov.u8 r0, q3[14]
-; CHECK-NEXT:    vmov.8 q6[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[14]
-; CHECK-NEXT:    vmov.8 q6[11], r0
-; CHECK-NEXT:    vmov.u8 r0, q3[15]
-; CHECK-NEXT:    vmov.8 q6[13], r0
-; CHECK-NEXT:    vmov.u8 r0, q2[15]
-; CHECK-NEXT:    vmov.8 q6[14], r0
-; CHECK-NEXT:    vstrw.32 q4, [r1]
-; CHECK-NEXT:    vmov.u8 r0, q6[1]
 ; CHECK-NEXT:    vmov.8 q5[1], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[2]
-; CHECK-NEXT:    vmov.8 q5[2], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[11]
 ; CHECK-NEXT:    vmov.8 q5[3], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[4]
+; CHECK-NEXT:    vmov.u8 r0, q3[12]
 ; CHECK-NEXT:    vmov.8 q5[4], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[5]
-; CHECK-NEXT:    vmov.8 q5[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[12]
 ; CHECK-NEXT:    vmov.8 q5[6], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[7]
+; CHECK-NEXT:    vmov.u8 r0, q3[13]
 ; CHECK-NEXT:    vmov.8 q5[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[8]
-; CHECK-NEXT:    vmov.8 q5[8], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[13]
 ; CHECK-NEXT:    vmov.8 q5[9], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[10]
+; CHECK-NEXT:    vmov.u8 r0, q3[14]
 ; CHECK-NEXT:    vmov.8 q5[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[11]
-; CHECK-NEXT:    vmov.8 q5[11], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[14]
 ; CHECK-NEXT:    vmov.8 q5[12], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[13]
+; CHECK-NEXT:    vmov.u8 r0, q3[15]
 ; CHECK-NEXT:    vmov.8 q5[13], r0
-; CHECK-NEXT:    vmov.u8 r0, q6[14]
-; CHECK-NEXT:    vmov.8 q5[14], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[15]
 ; CHECK-NEXT:    vmov.8 q5[15], r0
-; CHECK-NEXT:    vstrw.32 q5, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q4, [r1, #16]
+; CHECK-NEXT:    vmov.u8 r0, q5[0]
+; CHECK-NEXT:    vmov.8 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[1]
+; CHECK-NEXT:    vmov.8 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[11]
+; CHECK-NEXT:    vmov.8 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[12]
+; CHECK-NEXT:    vmov.8 q3[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[13]
+; CHECK-NEXT:    vmov.8 q3[8], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[14]
+; CHECK-NEXT:    vmov.8 q3[11], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[15]
+; CHECK-NEXT:    vmov.8 q3[14], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[2]
+; CHECK-NEXT:    vmov.8 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[3]
+; CHECK-NEXT:    vmov.8 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[4]
+; CHECK-NEXT:    vmov.8 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[5]
+; CHECK-NEXT:    vmov.8 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[6]
+; CHECK-NEXT:    vmov.8 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[7]
+; CHECK-NEXT:    vmov.8 q1[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[8]
+; CHECK-NEXT:    vmov.8 q1[8], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[9]
+; CHECK-NEXT:    vmov.8 q1[9], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[10]
+; CHECK-NEXT:    vmov.8 q1[10], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[11]
+; CHECK-NEXT:    vmov.8 q1[11], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[12]
+; CHECK-NEXT:    vmov.8 q1[12], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[13]
+; CHECK-NEXT:    vmov.8 q1[13], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[14]
+; CHECK-NEXT:    vmov.8 q1[14], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[15]
+; CHECK-NEXT:    vmov.8 q1[15], r0
+; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -918,28 +943,37 @@ define void @vst3_v4i64(<4 x i64> *%src, <12 x i64> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #32]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #80]
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vmov.f64 d6, d15
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #64]
-; CHECK-NEXT:    vmov.f64 d15, d13
-; CHECK-NEXT:    vmov.f64 d7, d1
-; CHECK-NEXT:    vmov.f64 d10, d2
-; CHECK-NEXT:    vstrw.32 q3, [r1, #80]
-; CHECK-NEXT:    vmov.f64 d11, d12
-; CHECK-NEXT:    vmov.f64 d2, d8
+; CHECK-NEXT:    vmov.f32 s16, s14
+; CHECK-NEXT:    vldrw.u32 q6, [r0, #16]
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
+; CHECK-NEXT:    vmov.f64 d7, d15
+; CHECK-NEXT:    vmov.f32 s18, s2
+; CHECK-NEXT:    vmov.f32 s19, s3
+; CHECK-NEXT:    vmov.f32 s20, s4
+; CHECK-NEXT:    vstrw.32 q4, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s21, s5
+; CHECK-NEXT:    vmov.f32 s22, s28
+; CHECK-NEXT:    vmov.f32 s23, s29
+; CHECK-NEXT:    vmov.f32 s4, s8
 ; CHECK-NEXT:    vstrw.32 q5, [r1]
-; CHECK-NEXT:    vmov.f64 d1, d5
+; CHECK-NEXT:    vmov.f32 s5, s9
+; CHECK-NEXT:    vmov.f32 s28, s24
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
-; CHECK-NEXT:    vmov.f64 d8, d15
+; CHECK-NEXT:    vmov.f32 s29, s25
+; CHECK-NEXT:    vmov.f32 s30, s12
+; CHECK-NEXT:    vmov.f32 s31, s13
+; CHECK-NEXT:    vmov.f32 s2, s26
+; CHECK-NEXT:    vstrw.32 q7, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s3, s27
+; CHECK-NEXT:    vmov.f32 s8, s14
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #64]
-; CHECK-NEXT:    vmov.f64 d12, d4
-; CHECK-NEXT:    vstrw.32 q4, [r1, #32]
-; CHECK-NEXT:    vmov.f64 d13, d14
-; CHECK-NEXT:    vstrw.32 q6, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s9, s15
+; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -990,19 +1024,19 @@ define void @vst3_v4f32(<4 x float> *%src, <12 x float> *%dst) {
 ; CHECK-NEXT:    .vsave {d8, d9}
 ; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0]
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s12, s1
-; CHECK-NEXT:    vmov.f32 s13, s5
+; CHECK-NEXT:    vmov.f32 s8, s1
+; CHECK-NEXT:    vmov.f32 s9, s5
 ; CHECK-NEXT:    vmov.f32 s18, s4
 ; CHECK-NEXT:    vmov.f32 s4, s6
-; CHECK-NEXT:    vmov.f32 s14, s10
-; CHECK-NEXT:    vmov.f32 s15, s2
-; CHECK-NEXT:    vmov.f32 s16, s8
-; CHECK-NEXT:    vstrw.32 q3, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s11, s2
+; CHECK-NEXT:    vmov.f32 s10, s14
+; CHECK-NEXT:    vmov.f32 s16, s12
+; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
 ; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s19, s9
-; CHECK-NEXT:    vmov.f32 s5, s11
+; CHECK-NEXT:    vmov.f32 s19, s13
+; CHECK-NEXT:    vmov.f32 s5, s15
 ; CHECK-NEXT:    vstrw.32 q4, [r1]
 ; CHECK-NEXT:    vmov.f32 s6, s3
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
@@ -1027,47 +1061,45 @@ define void @vst3_v8f32(<8 x float> *%src, <24 x float> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    .pad #32
-; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrw.u32 q6, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #16]
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s23, s3
-; CHECK-NEXT:    vldrw.u32 q6, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s20, s2
-; CHECK-NEXT:    vmov.f32 s21, s15
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #64]
-; CHECK-NEXT:    vstrw.32 q5, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s0, s12
-; CHECK-NEXT:    vmov.f32 s1, s8
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vmov.f32 s2, s24
-; CHECK-NEXT:    vstrw.32 q0, [r1, #48]
-; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s20, s4
-; CHECK-NEXT:    vmov.f32 s23, s5
-; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
-; CHECK-NEXT:    vmov.f32 s12, s9
-; CHECK-NEXT:    vmov.f32 s15, s10
-; CHECK-NEXT:    vmov.f32 s13, s25
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vstrw.32 q3, [r1, #64]
-; CHECK-NEXT:    vmov.f32 s21, s16
-; CHECK-NEXT:    vmov.f32 s22, s28
-; CHECK-NEXT:    vmov.f32 s8, s30
+; CHECK-NEXT:    vstrw.32 q6, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #64]
+; CHECK-NEXT:    vmov.f32 s12, s2
+; CHECK-NEXT:    vmov.f32 s20, s24
+; CHECK-NEXT:    vmov.f32 s13, s19
+; CHECK-NEXT:    vmov.f32 s24, s16
+; CHECK-NEXT:    vmov.f32 s27, s17
+; CHECK-NEXT:    vmov.f32 s2, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s15, s3
+; CHECK-NEXT:    vmov.f32 s14, s11
+; CHECK-NEXT:    vmov.f32 s23, s25
+; CHECK-NEXT:    vstrw.32 q3, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s22, s4
+; CHECK-NEXT:    vmov.f32 s21, s28
+; CHECK-NEXT:    vmov.f32 s25, s8
 ; CHECK-NEXT:    vstrw.32 q5, [r1]
-; CHECK-NEXT:    vmov.f32 s10, s19
-; CHECK-NEXT:    vmov.f32 s11, s31
-; CHECK-NEXT:    vmov.f32 s5, s29
-; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s4, s17
-; CHECK-NEXT:    vmov.f32 s7, s18
-; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
-; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vmov.f32 s26, s0
+; CHECK-NEXT:    vmov.f32 s0, s9
+; CHECK-NEXT:    vstrw.32 q6, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s3, s10
+; CHECK-NEXT:    vmov.f32 s9, s5
+; CHECK-NEXT:    vstrw.32 q0, [r1, #64]
+; CHECK-NEXT:    vmov.f32 s4, s6
+; CHECK-NEXT:    vmov.f32 s8, s29
+; CHECK-NEXT:    vmov.f32 s11, s30
+; CHECK-NEXT:    vmov.f32 s10, s18
+; CHECK-NEXT:    vmov.f32 s5, s19
+; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s6, s31
+; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
+; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -1089,98 +1121,106 @@ define void @vst3_v16f32(<16 x float> *%src, <48 x float> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    .pad #128
-; CHECK-NEXT:    sub sp, #128
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #176]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #128]
+; CHECK-NEXT:    .pad #144
+; CHECK-NEXT:    sub sp, #144
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #112]
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #128]
+; CHECK-NEXT:    vstrw.32 q5, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #96]
+; CHECK-NEXT:    vldrw.u32 q6, [r0]
+; CHECK-NEXT:    vstrw.32 q7, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q5, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #160]
+; CHECK-NEXT:    vstrw.32 q5, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s12, s1
+; CHECK-NEXT:    vstrw.32 q7, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s13, s9
+; CHECK-NEXT:    vmov.f32 s15, s2
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #176]
+; CHECK-NEXT:    vmov.f32 s14, s26
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q7, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q4, [sp, #128] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #144]
+; CHECK-NEXT:    vstrw.32 q3, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s14, s3
+; CHECK-NEXT:    vmov.f32 s12, s10
+; CHECK-NEXT:    vmov.f32 s13, s27
+; CHECK-NEXT:    vmov.f32 s15, s11
+; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-NEXT:    vmov.f32 s13, s23
+; CHECK-NEXT:    vmov.f32 s12, s6
+; CHECK-NEXT:    vmov.f32 s15, s7
+; CHECK-NEXT:    vmov.f32 s14, s31
 ; CHECK-NEXT:    vstrw.32 q3, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #160]
-; CHECK-NEXT:    vmov.f32 s24, s9
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #144]
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s14, s8
+; CHECK-NEXT:    vmov.f64 d4, d14
+; CHECK-NEXT:    vmov.f32 s0, s20
+; CHECK-NEXT:    vmov.f32 s3, s21
+; CHECK-NEXT:    vmov.f64 d10, d2
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s2, s20
+; CHECK-NEXT:    vmov.f32 s1, s8
+; CHECK-NEXT:    vmov.f64 d14, d2
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #128] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s20, s9
+; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s23, s30
+; CHECK-NEXT:    vmov.f32 s12, s24
+; CHECK-NEXT:    vstrw.32 q5, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s15, s25
 ; CHECK-NEXT:    vstrw.32 q3, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #96]
-; CHECK-NEXT:    vmov.f32 s26, s6
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #112]
-; CHECK-NEXT:    vstrw.32 q3, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s27, s10
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #48]
-; CHECK-NEXT:    vstrw.32 q3, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s25, s1
-; CHECK-NEXT:    vstrw.32 q3, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
-; CHECK-NEXT:    vstrw.32 q6, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s21, s1
+; CHECK-NEXT:    vmov.f32 s12, s2
+; CHECK-NEXT:    vmov.f32 s15, s3
+; CHECK-NEXT:    vmov q0, q4
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f64 d0, d14
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s31, s1
+; CHECK-NEXT:    vmov.f64 d0, d8
+; CHECK-NEXT:    vmov.f32 s20, s9
+; CHECK-NEXT:    vmov.f32 s23, s10
+; CHECK-NEXT:    vmov.f32 s14, s11
+; CHECK-NEXT:    vmov.f32 s29, s8
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #128] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s24, s2
-; CHECK-NEXT:    vstrw.32 q3, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s30, s8
 ; CHECK-NEXT:    vmov.f32 s27, s3
-; CHECK-NEXT:    vmov.f32 s14, s0
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s12, s4
-; CHECK-NEXT:    vmov.f32 s15, s5
-; CHECK-NEXT:    vmov.f32 s13, s8
-; CHECK-NEXT:    vstrw.32 q3, [sp, #64] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s25, s7
-; CHECK-NEXT:    vmov.f32 s6, s0
-; CHECK-NEXT:    vmov.f32 s13, s1
-; CHECK-NEXT:    vmov.f32 s0, s2
-; CHECK-NEXT:    vmov.f32 s4, s16
-; CHECK-NEXT:    vmov.f32 s5, s28
-; CHECK-NEXT:    vmov.f32 s7, s17
-; CHECK-NEXT:    vmov.f32 s1, s19
-; CHECK-NEXT:    vstrw.32 q1, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s2, s31
-; CHECK-NEXT:    vldrw.u32 q1, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s26, s11
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s15, s30
-; CHECK-NEXT:    vstrw.32 q6, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s17, s1
-; CHECK-NEXT:    vldrw.u32 q6, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s30, s0
-; CHECK-NEXT:    vmov.f32 s0, s2
-; CHECK-NEXT:    vmov.f32 s1, s11
-; CHECK-NEXT:    vmov.f32 s2, s7
-; CHECK-NEXT:    vmov.f32 s14, s18
-; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s28, s8
-; CHECK-NEXT:    vmov.f32 s31, s9
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s12, s29
-; CHECK-NEXT:    vmov.f32 s29, s4
-; CHECK-NEXT:    vstrw.32 q3, [r1, #160]
-; CHECK-NEXT:    vmov.f32 s16, s5
 ; CHECK-NEXT:    vstrw.32 q7, [r1, #96]
-; CHECK-NEXT:    vmov.f32 s19, s6
-; CHECK-NEXT:    vmov.f32 s4, s8
-; CHECK-NEXT:    vstrw.32 q4, [r1, #112]
-; CHECK-NEXT:    vmov.f32 s6, s20
-; CHECK-NEXT:    vmov.f32 s20, s22
-; CHECK-NEXT:    vmov.f32 s5, s0
-; CHECK-NEXT:    vmov.f32 s8, s1
-; CHECK-NEXT:    vmov.f32 s11, s2
-; CHECK-NEXT:    vmov.f32 s22, s3
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s7, s9
-; CHECK-NEXT:    vstrw.32 q0, [r1, #128]
-; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s9, s21
-; CHECK-NEXT:    vstrw.32 q1, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s8, s0
+; CHECK-NEXT:    vmov.f32 s11, s1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vmov.f64 d8, d0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q5, [r1, #112]
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #144]
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [r1, #160]
 ; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s21, s27
-; CHECK-NEXT:    vstrw.32 q2, [r1, #64]
+; CHECK-NEXT:    vmov.f32 s13, s7
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #80] @ 16-byte Reload
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #176]
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q5, [r1, #80]
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s25, s19
+; CHECK-NEXT:    vstrw.32 q3, [r1, #128]
+; CHECK-NEXT:    vmov.f32 s26, s7
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    add sp, #128
+; CHECK-NEXT:    vmov.f32 s10, s16
+; CHECK-NEXT:    vstrw.32 q6, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s9, s4
+; CHECK-NEXT:    vmov.f32 s16, s5
+; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s19, s6
+; CHECK-NEXT:    vstrw.32 q4, [r1, #64]
+; CHECK-NEXT:    add sp, #144
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -1285,55 +1325,50 @@ define void @vst3_v8f16(<8 x half> *%src, <24 x half> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
 ; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vmovx.f16 s0, s18
-; CHECK-NEXT:    vmov.f32 s4, s15
-; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vins.f16 s4, s19
-; CHECK-NEXT:    vmov.16 q0[0], r2
-; CHECK-NEXT:    vmovx.f16 s10, s16
-; CHECK-NEXT:    vmov.f32 s1, s4
-; CHECK-NEXT:    vmovx.f16 s4, s19
-; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vmov.16 q0[6], r2
-; CHECK-NEXT:    vmov r0, s10
-; CHECK-NEXT:    vmovx.f16 s8, s6
-; CHECK-NEXT:    vmovx.f16 s2, s15
-; CHECK-NEXT:    vins.f16 s0, s8
-; CHECK-NEXT:    vmovx.f16 s8, s7
-; CHECK-NEXT:    vins.f16 s3, s8
-; CHECK-NEXT:    vmov.f32 s8, s12
-; CHECK-NEXT:    vins.f16 s8, s16
-; CHECK-NEXT:    vins.f16 s7, s2
-; CHECK-NEXT:    vmov.f32 s2, s13
-; CHECK-NEXT:    vmov.16 q2[4], r0
-; CHECK-NEXT:    vins.f16 s2, s17
-; CHECK-NEXT:    vmov.f32 s11, s2
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s0, s4
 ; CHECK-NEXT:    vmovx.f16 s2, s12
-; CHECK-NEXT:    vmovx.f16 s12, s4
-; CHECK-NEXT:    vins.f16 s4, s2
-; CHECK-NEXT:    vins.f16 s10, s12
-; CHECK-NEXT:    vmovx.f16 s12, s17
-; CHECK-NEXT:    vmov.f32 s2, s14
-; CHECK-NEXT:    vmov r0, s12
-; CHECK-NEXT:    vins.f16 s2, s18
-; CHECK-NEXT:    vmov.16 q4[2], r0
-; CHECK-NEXT:    vmovx.f16 s12, s5
-; CHECK-NEXT:    vmov.f32 s18, s2
-; CHECK-NEXT:    vmovx.f16 s2, s13
-; CHECK-NEXT:    vins.f16 s5, s2
-; CHECK-NEXT:    vmovx.f16 s2, s14
-; CHECK-NEXT:    vins.f16 s6, s2
-; CHECK-NEXT:    vmov.f32 s2, s7
-; CHECK-NEXT:    vmov.f32 s9, s4
-; CHECK-NEXT:    vins.f16 s17, s12
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vstrw.32 q0, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s19, s6
-; CHECK-NEXT:    vstrw.32 q2, [r1]
-; CHECK-NEXT:    vstrw.32 q4, [r1, #16]
+; CHECK-NEXT:    vins.f16 s0, s12
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmovx.f16 s4, s4
+; CHECK-NEXT:    vmov.f32 s1, s16
+; CHECK-NEXT:    vmovx.f16 s11, s15
+; CHECK-NEXT:    vmov.f32 s8, s5
+; CHECK-NEXT:    vins.f16 s1, s4
+; CHECK-NEXT:    vmovx.f16 s4, s16
+; CHECK-NEXT:    vins.f16 s8, s13
+; CHECK-NEXT:    vins.f16 s2, s4
+; CHECK-NEXT:    vmovx.f16 s4, s19
+; CHECK-NEXT:    vmov.f32 s3, s8
+; CHECK-NEXT:    vins.f16 s11, s4
+; CHECK-NEXT:    vmovx.f16 s4, s18
+; CHECK-NEXT:    vmovx.f16 s8, s14
+; CHECK-NEXT:    vins.f16 s9, s15
+; CHECK-NEXT:    vins.f16 s8, s4
+; CHECK-NEXT:    vmovx.f16 s4, s9
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vins.f16 s9, s4
+; CHECK-NEXT:    vrev32.16 q3, q3
+; CHECK-NEXT:    vmov.f32 s10, s19
+; CHECK-NEXT:    vmovx.f16 s4, s7
+; CHECK-NEXT:    vins.f16 s10, s4
+; CHECK-NEXT:    vmovx.f16 s4, s5
+; CHECK-NEXT:    vmov.f32 s12, s17
+; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
+; CHECK-NEXT:    vins.f16 s12, s4
+; CHECK-NEXT:    vmovx.f16 s4, s6
+; CHECK-NEXT:    vins.f16 s18, s4
+; CHECK-NEXT:    vmovx.f16 s4, s17
+; CHECK-NEXT:    vins.f16 s13, s4
+; CHECK-NEXT:    vmovx.f16 s4, s14
+; CHECK-NEXT:    vins.f16 s6, s4
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s14, s6
+; CHECK-NEXT:    vstrw.32 q0, [r1]
+; CHECK-NEXT:    vstrw.32 q3, [r1, #16]
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -1355,128 +1390,121 @@ define void @vst3_v16f16(<16 x half> *%src, <48 x half> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    .pad #48
-; CHECK-NEXT:    sub sp, #48
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s8, s12
-; CHECK-NEXT:    vmovx.f16 s2, s4
-; CHECK-NEXT:    vmov.f32 s0, s13
-; CHECK-NEXT:    vins.f16 s8, s4
-; CHECK-NEXT:    vmov r2, s2
-; CHECK-NEXT:    vins.f16 s0, s5
-; CHECK-NEXT:    vmov.16 q2[4], r2
-; CHECK-NEXT:    vmov q4, q3
-; CHECK-NEXT:    vmov.f32 s11, s0
-; CHECK-NEXT:    vmovx.f16 s0, s16
-; CHECK-NEXT:    vmov.f32 s12, s8
-; CHECK-NEXT:    vmov.f64 d11, d9
-; CHECK-NEXT:    vmov.f32 s21, s17
-; CHECK-NEXT:    vmov.f64 d7, d5
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
-; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vins.f16 s8, s0
-; CHECK-NEXT:    vins.f16 s14, s2
-; CHECK-NEXT:    vmovx.f16 s2, s24
-; CHECK-NEXT:    vstrw.32 q3, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
+; CHECK-NEXT:    .pad #96
+; CHECK-NEXT:    sub sp, #96
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-NEXT:    vmovx.f16 s0, s15
+; CHECK-NEXT:    vmovx.f16 s7, s11
+; CHECK-NEXT:    vins.f16 s7, s0
+; CHECK-NEXT:    vmov q6, q2
+; CHECK-NEXT:    vmovx.f16 s0, s14
+; CHECK-NEXT:    vmovx.f16 s4, s10
+; CHECK-NEXT:    vins.f16 s1, s11
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vins.f16 s4, s0
+; CHECK-NEXT:    vmovx.f16 s0, s1
+; CHECK-NEXT:    vstrw.32 q1, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s5, s11
+; CHECK-NEXT:    vins.f16 s5, s0
+; CHECK-NEXT:    vmov.f32 s6, s15
+; CHECK-NEXT:    vmovx.f16 s0, s11
+; CHECK-NEXT:    vmov q7, q4
+; CHECK-NEXT:    vins.f16 s6, s0
+; CHECK-NEXT:    vmovx.f16 s2, s20
+; CHECK-NEXT:    vstrw.32 q1, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s4, s16
+; CHECK-NEXT:    vins.f16 s4, s20
+; CHECK-NEXT:    vmov.f32 s0, s17
 ; CHECK-NEXT:    vmov r2, s2
-; CHECK-NEXT:    vmov.f32 s16, s12
-; CHECK-NEXT:    vins.f16 s16, s24
-; CHECK-NEXT:    vmov.f32 s0, s13
+; CHECK-NEXT:    vstrw.32 q7, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s16, s4
+; CHECK-NEXT:    vmovx.f16 s4, s28
+; CHECK-NEXT:    vldrw.u32 q7, [r0, #80]
 ; CHECK-NEXT:    vmov.16 q4[4], r2
-; CHECK-NEXT:    vins.f16 s0, s25
+; CHECK-NEXT:    vins.f16 s0, s21
+; CHECK-NEXT:    vstrw.32 q5, [sp] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.f32 s19, s0
+; CHECK-NEXT:    vmovx.f16 s0, s28
+; CHECK-NEXT:    vins.f16 s18, s0
+; CHECK-NEXT:    vmov.f64 d0, d4
+; CHECK-NEXT:    vstrw.32 q6, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s20, s8
+; CHECK-NEXT:    vmovx.f16 s8, s24
+; CHECK-NEXT:    vmov.f32 s22, s28
+; CHECK-NEXT:    vins.f16 s20, s24
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.f32 s17, s28
+; CHECK-NEXT:    vmov.16 q5[4], r0
+; CHECK-NEXT:    vmov.f32 s2, s10
+; CHECK-NEXT:    vins.f16 s17, s4
+; CHECK-NEXT:    vmov.f32 s4, s9
+; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmovx.f16 s0, s0
+; CHECK-NEXT:    vmov.f32 s21, s12
+; CHECK-NEXT:    vmovx.f16 s24, s10
+; CHECK-NEXT:    vins.f16 s21, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s12
-; CHECK-NEXT:    vmov.f64 d15, d13
-; CHECK-NEXT:    vmov.f32 s17, s13
-; CHECK-NEXT:    vmov.f32 s24, s16
-; CHECK-NEXT:    vmov.f64 d13, d9
-; CHECK-NEXT:    vmov.f64 d9, d7
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #64]
-; CHECK-NEXT:    vmovx.f16 s2, s12
-; CHECK-NEXT:    vins.f16 s12, s0
-; CHECK-NEXT:    vins.f16 s26, s2
-; CHECK-NEXT:    vmovx.f16 s2, s30
-; CHECK-NEXT:    vmov.f32 s0, s19
-; CHECK-NEXT:    vstrw.32 q6, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov r0, s2
-; CHECK-NEXT:    vins.f16 s0, s31
-; CHECK-NEXT:    vmov.f32 s29, s25
-; CHECK-NEXT:    vmov.16 q6[0], r0
-; CHECK-NEXT:    vmov.f32 s25, s0
-; CHECK-NEXT:    vmovx.f16 s0, s31
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vmovx.f16 s0, s14
-; CHECK-NEXT:    vmov.16 q6[6], r0
-; CHECK-NEXT:    vmovx.f16 s2, s15
+; CHECK-NEXT:    vins.f16 s22, s0
+; CHECK-NEXT:    vmovx.f16 s0, s30
 ; CHECK-NEXT:    vins.f16 s24, s0
-; CHECK-NEXT:    vmovx.f16 s0, s19
-; CHECK-NEXT:    vins.f16 s15, s0
-; CHECK-NEXT:    vmovx.f16 s0, s6
-; CHECK-NEXT:    vmov.f32 s4, s23
-; CHECK-NEXT:    vins.f16 s27, s2
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vins.f16 s4, s7
-; CHECK-NEXT:    vmov.16 q0[0], r0
-; CHECK-NEXT:    vstrw.32 q7, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s1, s4
-; CHECK-NEXT:    vmovx.f16 s4, s7
-; CHECK-NEXT:    vmov r0, s4
-; CHECK-NEXT:    vmovx.f16 s4, s10
-; CHECK-NEXT:    vmov.16 q0[6], r0
-; CHECK-NEXT:    vldrw.u32 q7, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmovx.f16 s0, s31
+; CHECK-NEXT:    vmovx.f16 s27, s11
+; CHECK-NEXT:    vins.f16 s4, s25
+; CHECK-NEXT:    vins.f16 s27, s0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vins.f16 s25, s11
+; CHECK-NEXT:    vmov.f32 s23, s4
+; CHECK-NEXT:    vmovx.f16 s4, s25
+; CHECK-NEXT:    vmov.f32 s25, s3
+; CHECK-NEXT:    vmov.f32 s26, s31
+; CHECK-NEXT:    vmovx.f16 s0, s3
+; CHECK-NEXT:    vins.f16 s25, s4
+; CHECK-NEXT:    vins.f16 s26, s0
+; CHECK-NEXT:    vmovx.f16 s4, s1
+; CHECK-NEXT:    vmov.f32 s0, s29
 ; CHECK-NEXT:    vins.f16 s0, s4
-; CHECK-NEXT:    vmovx.f16 s4, s11
-; CHECK-NEXT:    vmovx.f16 s2, s23
-; CHECK-NEXT:    vins.f16 s3, s4
-; CHECK-NEXT:    vmovx.f16 s4, s5
-; CHECK-NEXT:    vins.f16 s11, s2
-; CHECK-NEXT:    vmov.f32 s2, s22
-; CHECK-NEXT:    vmov r0, s4
-; CHECK-NEXT:    vins.f16 s2, s6
-; CHECK-NEXT:    vmov.16 q1[2], r0
-; CHECK-NEXT:    vmov.f32 s29, s12
-; CHECK-NEXT:    vmovx.f16 s4, s21
-; CHECK-NEXT:    vmovx.f16 s12, s9
+; CHECK-NEXT:    vmovx.f16 s4, s2
+; CHECK-NEXT:    vins.f16 s30, s4
+; CHECK-NEXT:    vmov.f32 s6, s18
+; CHECK-NEXT:    vrev32.16 q2, q2
+; CHECK-NEXT:    vmovx.f16 s4, s29
+; CHECK-NEXT:    vmov.f32 s3, s30
+; CHECK-NEXT:    vldrw.u32 q7, [sp, #32] @ 16-byte Reload
 ; CHECK-NEXT:    vins.f16 s9, s4
-; CHECK-NEXT:    vmovx.f16 s4, s22
-; CHECK-NEXT:    vins.f16 s10, s4
-; CHECK-NEXT:    vmov.f32 s21, s17
-; CHECK-NEXT:    vmov.f32 s22, s18
-; CHECK-NEXT:    vins.f16 s5, s12
-; CHECK-NEXT:    vmov.f32 s4, s18
-; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q7, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s6, s2
-; CHECK-NEXT:    vmovx.f16 s12, s17
-; CHECK-NEXT:    vins.f16 s4, s18
-; CHECK-NEXT:    vmov r0, s12
-; CHECK-NEXT:    vmovx.f16 s12, s13
-; CHECK-NEXT:    vmov.16 q7[2], r0
-; CHECK-NEXT:    vmov.f32 s2, s11
-; CHECK-NEXT:    vmov.f32 s30, s4
-; CHECK-NEXT:    vmovx.f16 s4, s21
-; CHECK-NEXT:    vins.f16 s13, s4
-; CHECK-NEXT:    vmovx.f16 s4, s22
+; CHECK-NEXT:    vmovx.f16 s4, s10
+; CHECK-NEXT:    vins.f16 s2, s4
+; CHECK-NEXT:    vmovx.f16 s4, s29
+; CHECK-NEXT:    vmov.f32 s8, s13
+; CHECK-NEXT:    vstrw.32 q6, [r1, #80]
+; CHECK-NEXT:    vins.f16 s8, s4
+; CHECK-NEXT:    vmovx.f16 s4, s30
 ; CHECK-NEXT:    vins.f16 s14, s4
-; CHECK-NEXT:    vldrw.u32 q5, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s26, s15
-; CHECK-NEXT:    vins.f16 s29, s12
-; CHECK-NEXT:    vmov.f32 s21, s8
-; CHECK-NEXT:    vstrw.32 q6, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s4, s9
-; CHECK-NEXT:    vstrw.32 q5, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s7, s10
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vmov.f32 s28, s13
-; CHECK-NEXT:    vstrw.32 q1, [r1, #64]
-; CHECK-NEXT:    vmov.f32 s31, s14
-; CHECK-NEXT:    vstrw.32 q7, [r1, #16]
-; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vmov.f32 s10, s30
+; CHECK-NEXT:    vmov.f32 s11, s14
+; CHECK-NEXT:    vmovx.f16 s4, s13
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q7, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s18, s6
+; CHECK-NEXT:    vstrw.32 q5, [r1]
+; CHECK-NEXT:    vrev32.16 q3, q3
+; CHECK-NEXT:    vmov.f32 s6, s30
+; CHECK-NEXT:    vldrw.u32 q7, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vins.f16 s13, s4
+; CHECK-NEXT:    vmovx.f16 s4, s14
+; CHECK-NEXT:    vmov.f32 s1, s9
+; CHECK-NEXT:    vins.f16 s10, s4
+; CHECK-NEXT:    vmov.f32 s9, s13
+; CHECK-NEXT:    vmov.f32 s4, s28
+; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s7, s31
+; CHECK-NEXT:    vstrw.32 q4, [r1, #48]
+; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q0, [r1, #64]
+; CHECK-NEXT:    add sp, #96
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst4.ll b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
index b094cfe6b5679..b76a97d0246bb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
@@ -757,42 +757,53 @@ define void @vst4_v4i64(<4 x i64> *%src, <16 x i64> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    .pad #64
 ; CHECK-NEXT:    sub sp, #64
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q6, [r0]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #96]
-; CHECK-NEXT:    vstrw.32 q7, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d15, d10
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q7, [r0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #96]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #64]
+; CHECK-NEXT:    vmov.f32 s6, s0
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #112]
+; CHECK-NEXT:    vmov.f32 s7, s1
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #48]
+; CHECK-NEXT:    vmov.f64 d13, d1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #112]
 ; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d14, d12
-; CHECK-NEXT:    vstrw.32 q7, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d14, d4
-; CHECK-NEXT:    vmov.f64 d15, d2
-; CHECK-NEXT:    vstrw.32 q7, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d4, d0
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #80]
+; CHECK-NEXT:    vmov.f32 s4, s28
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s5, s29
+; CHECK-NEXT:    vmov.f32 s24, s30
+; CHECK-NEXT:    vstrw.32 q1, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s25, s31
 ; CHECK-NEXT:    vldrw.u32 q7, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f64 d10, d13
-; CHECK-NEXT:    vmov.f64 d2, d5
-; CHECK-NEXT:    vstrw.32 q5, [r1, #32]
-; CHECK-NEXT:    vmov.f64 d5, d6
-; CHECK-NEXT:    vstrw.32 q1, [r1, #48]
-; CHECK-NEXT:    vmov.f64 d13, d8
-; CHECK-NEXT:    vstrw.32 q2, [r1, #64]
-; CHECK-NEXT:    vmov.f64 d12, d0
-; CHECK-NEXT:    vmov.f64 d8, d1
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q6, [r1, #80]
-; CHECK-NEXT:    vstrw.32 q0, [r1]
+; CHECK-NEXT:    vmov.f32 s6, s8
+; CHECK-NEXT:    vstrw.32 q6, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s7, s9
+; CHECK-NEXT:    vmov.f32 s4, s12
+; CHECK-NEXT:    vmov.f32 s5, s13
+; CHECK-NEXT:    vmov.f32 s8, s14
+; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s9, s15
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f64 d1, d15
+; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-NEXT:    vmov.f64 d13, d7
+; CHECK-NEXT:    vmov.f32 s14, s20
+; CHECK-NEXT:    vmov.f32 s15, s21
+; CHECK-NEXT:    vmov.f32 s30, s16
+; CHECK-NEXT:    vstrw.32 q3, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s31, s17
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s16, s2
+; CHECK-NEXT:    vstrw.32 q7, [r1, #64]
+; CHECK-NEXT:    vmov.f32 s17, s3
 ; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f64 d6, d15
-; CHECK-NEXT:    vstrw.32 q4, [r1, #112]
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vstrw.32 q3, [r1, #96]
+; CHECK-NEXT:    vmov.f32 s20, s26
+; CHECK-NEXT:    vstrw.32 q4, [r1, #96]
+; CHECK-NEXT:    vmov.f32 s21, s27
+; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q5, [r1, #112]
+; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    add sp, #64
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr

diff  --git a/llvm/test/CodeGen/X86/haddsub-4.ll b/llvm/test/CodeGen/X86/haddsub-4.ll
index 2250a9c78573b..3784400e30862 100644
--- a/llvm/test/CodeGen/X86/haddsub-4.ll
+++ b/llvm/test/CodeGen/X86/haddsub-4.ll
@@ -85,10 +85,12 @@ define <8 x float> @hadd_reverse2_v8f32(<8 x float> %a0, <8 x float> %a1) {
 ; SSE-LABEL: hadd_reverse2_v8f32:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm4
-; SSE-NEXT:    haddps %xmm3, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0,3,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,2],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,2,1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
 ; SSE-NEXT:    haddps %xmm2, %xmm4
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,0,3,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,2,1,0]
+; SSE-NEXT:    haddps %xmm3, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    movaps %xmm4, %xmm1
 ; SSE-NEXT:    retq
@@ -276,10 +278,18 @@ define <8 x double> @hadd_reverse2_v8f64(<8 x double> %a0, <8 x double> %a1) nou
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd %xmm1, %xmm8
 ; SSE-NEXT:    movapd %xmm0, %xmm9
-; SSE-NEXT:    haddpd %xmm7, %xmm3
-; SSE-NEXT:    haddpd %xmm6, %xmm2
-; SSE-NEXT:    haddpd %xmm5, %xmm8
+; SSE-NEXT:    shufpd {{.*#+}} xmm9 = xmm9[1],xmm0[0]
+; SSE-NEXT:    shufpd {{.*#+}} xmm8 = xmm8[1],xmm1[0]
+; SSE-NEXT:    shufpd {{.*#+}} xmm2 = xmm2[1,0]
+; SSE-NEXT:    shufpd {{.*#+}} xmm3 = xmm3[1,0]
+; SSE-NEXT:    shufpd {{.*#+}} xmm4 = xmm4[1,0]
 ; SSE-NEXT:    haddpd %xmm4, %xmm9
+; SSE-NEXT:    shufpd {{.*#+}} xmm5 = xmm5[1,0]
+; SSE-NEXT:    haddpd %xmm5, %xmm8
+; SSE-NEXT:    shufpd {{.*#+}} xmm6 = xmm6[1,0]
+; SSE-NEXT:    haddpd %xmm6, %xmm2
+; SSE-NEXT:    shufpd {{.*#+}} xmm7 = xmm7[1,0]
+; SSE-NEXT:    haddpd %xmm7, %xmm3
 ; SSE-NEXT:    movapd %xmm3, %xmm0
 ; SSE-NEXT:    movapd %xmm2, %xmm1
 ; SSE-NEXT:    movapd %xmm8, %xmm2
@@ -288,20 +298,26 @@ define <8 x double> @hadd_reverse2_v8f64(<8 x double> %a0, <8 x double> %a1) nou
 ;
 ; AVX1-LABEL: hadd_reverse2_v8f64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vhaddpd %ymm3, %ymm1, %ymm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX1-NEXT:    vhaddpd %ymm2, %ymm0, %ymm0
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT:    vmovapd %ymm3, %ymm0
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm1[1,0,3,2]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm1 = ymm1[1,0,3,2]
+; AVX1-NEXT:    vhaddpd %ymm1, %ymm0, %ymm1
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; AVX1-NEXT:    vhaddpd %ymm0, %ymm4, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: hadd_reverse2_v8f64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vhaddpd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX2-NEXT:    vhaddpd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT:    vmovapd %ymm3, %ymm0
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm1[3,2,1,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[3,2,1,0]
+; AVX2-NEXT:    vhaddpd %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm3[3,2,1,0]
+; AVX2-NEXT:    vhaddpd %ymm0, %ymm4, %ymm0
 ; AVX2-NEXT:    retq
   %shuf0 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   %shuf1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -314,19 +330,19 @@ define <8 x double> @hadd_reverse2_v8f64(<8 x double> %a0, <8 x double> %a1) nou
 define <16 x float> @hadd_reverse_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
 ; SSE-LABEL: hadd_reverse_v16f32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movaps %xmm4, %xmm8
-; SSE-NEXT:    movaps %xmm0, %xmm4
-; SSE-NEXT:    haddps %xmm3, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
-; SSE-NEXT:    haddps %xmm7, %xmm6
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
-; SSE-NEXT:    haddps %xmm1, %xmm4
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,2,1,0]
-; SSE-NEXT:    haddps %xmm5, %xmm8
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[3,2,1,0]
-; SSE-NEXT:    movaps %xmm2, %xmm0
-; SSE-NEXT:    movaps %xmm6, %xmm1
-; SSE-NEXT:    movaps %xmm4, %xmm2
+; SSE-NEXT:    movaps %xmm5, %xmm8
+; SSE-NEXT:    movaps %xmm1, %xmm5
+; SSE-NEXT:    haddps %xmm2, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0,3,2]
+; SSE-NEXT:    haddps %xmm6, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,0,3,2]
+; SSE-NEXT:    haddps %xmm0, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,0,3,2]
+; SSE-NEXT:    haddps %xmm4, %xmm8
+; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,0,3,2]
+; SSE-NEXT:    movaps %xmm3, %xmm0
+; SSE-NEXT:    movaps %xmm7, %xmm1
+; SSE-NEXT:    movaps %xmm5, %xmm2
 ; SSE-NEXT:    movaps %xmm8, %xmm3
 ; SSE-NEXT:    retq
 ;
@@ -363,14 +379,18 @@ define <16 x float> @hadd_reverse2_v16f32(<16 x float> %a0, <16 x float> %a1) no
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm8
 ; SSE-NEXT:    movaps %xmm0, %xmm9
-; SSE-NEXT:    haddps %xmm7, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0,3,2]
-; SSE-NEXT:    haddps %xmm6, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0,3,2]
-; SSE-NEXT:    haddps %xmm5, %xmm8
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,0,3,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[3,2],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[3,2],xmm1[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,2,1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,2,1,0]
 ; SSE-NEXT:    haddps %xmm4, %xmm9
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[1,0,3,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,2,1,0]
+; SSE-NEXT:    haddps %xmm5, %xmm8
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
+; SSE-NEXT:    haddps %xmm6, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,2,1,0]
+; SSE-NEXT:    haddps %xmm7, %xmm3
 ; SSE-NEXT:    movaps %xmm3, %xmm0
 ; SSE-NEXT:    movaps %xmm2, %xmm1
 ; SSE-NEXT:    movaps %xmm8, %xmm2
@@ -379,24 +399,30 @@ define <16 x float> @hadd_reverse2_v16f32(<16 x float> %a0, <16 x float> %a1) no
 ;
 ; AVX1-LABEL: hadd_reverse2_v16f32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vhaddps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = ymm1[1,0,3,2,5,4,7,6]
-; AVX1-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm0[1,0,3,2,5,4,7,6]
-; AVX1-NEXT:    vmovaps %ymm3, %ymm0
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm4 = ymm1[3,2,1,0,7,6,5,4]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
+; AVX1-NEXT:    vhaddps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; AVX1-NEXT:    vhaddps %ymm0, %ymm4, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: hadd_reverse2_v16f32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vhaddps %ymm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[1,0,3,2,5,4,7,6]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX2-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT:    vmovaps %ymm3, %ymm0
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm1[2,3,0,1]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = ymm2[3,2,1,0,7,6,5,4]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,3,0,1]
+; AVX2-NEXT:    vhaddps %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,2,1,0,7,6,5,4]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-NEXT:    vhaddps %ymm0, %ymm4, %ymm0
 ; AVX2-NEXT:    retq
   %shuf0 = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   %shuf1 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>

diff  --git a/llvm/test/CodeGen/X86/insertelement-duplicates.ll b/llvm/test/CodeGen/X86/insertelement-duplicates.ll
index 47777c5bb61c8..3f693728e6fb0 100644
--- a/llvm/test/CodeGen/X86/insertelement-duplicates.ll
+++ b/llvm/test/CodeGen/X86/insertelement-duplicates.ll
@@ -11,7 +11,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
 ; SSE-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; SSE-32-NEXT:    xorps %xmm0, %xmm0
 ; SSE-32-NEXT:    xorps %xmm1, %xmm1
-; SSE-32-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],mem[0,0]
+; SSE-32-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,0]
 ; SSE-32-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
 ; SSE-32-NEXT:    movups %xmm0, 624(%eax)
 ; SSE-32-NEXT:    movups %xmm1, 608(%eax)
@@ -21,7 +21,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
 ; SSE-64:       # %bb.0: # %L.entry
 ; SSE-64-NEXT:    xorps %xmm0, %xmm0
 ; SSE-64-NEXT:    xorps %xmm1, %xmm1
-; SSE-64-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],mem[0,0]
+; SSE-64-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,0]
 ; SSE-64-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
 ; SSE-64-NEXT:    movups %xmm0, 624(%rsi)
 ; SSE-64-NEXT:    movups %xmm1, 608(%rsi)

diff  --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 34f6d9ffb6799..c41fde4fa3238 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -519,14 +519,15 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
 ; SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
 ; SSE2-NEXT:    movaps %xmm0, %xmm3
 ; SSE2-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[0,2]
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE2-NEXT:    movaps %xmm2, 16(%rdi)
+; SSE2-NEXT:    movaps %xmm2, %xmm5
+; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[1,0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2]
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
+; SSE2-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[0,2]
+; SSE2-NEXT:    movaps %xmm2, 32(%rdi)
+; SSE2-NEXT:    movaps %xmm5, 16(%rdi)
 ; SSE2-NEXT:    movaps %xmm4, (%rdi)
-; SSE2-NEXT:    movaps %xmm0, 32(%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: v12i32:
@@ -537,12 +538,14 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
 ; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5],xmm4[6,7]
 ; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
-; SSE42-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
-; SSE42-NEXT:    movdqa %xmm1, 32(%rdi)
+; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
+; SSE42-NEXT:    movdqa %xmm0, 32(%rdi)
 ; SSE42-NEXT:    movdqa %xmm4, 16(%rdi)
 ; SSE42-NEXT:    movdqa %xmm3, (%rdi)
 ; SSE42-NEXT:    retq
@@ -1208,42 +1211,42 @@ define void @interleave_24i16_out_reverse(<24 x i16>* %p, <8 x i16>* %q1, <8 x i
 define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
 ; SSE2-LABEL: interleave_24i16_in:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqu (%rsi), %xmm0
+; SSE2-NEXT:    movdqu (%rsi), %xmm3
 ; SSE2-NEXT:    movdqu (%rdx), %xmm2
-; SSE2-NEXT:    movdqu (%rcx), %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,0,0,0]
-; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE2-NEXT:    movdqa %xmm4, %xmm5
-; SSE2-NEXT:    pandn %xmm1, %xmm5
-; SSE2-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,5]
-; SSE2-NEXT:    pand %xmm4, %xmm1
-; SSE2-NEXT:    por %xmm5, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,2,2]
-; SSE2-NEXT:    pand %xmm4, %xmm5
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm6 = xmm2[3,3,3,3,4,5,6,7]
-; SSE2-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE2-NEXT:    pandn %xmm6, %xmm4
-; SSE2-NEXT:    por %xmm5, %xmm4
-; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    movdqu (%rcx), %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pandn %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
+; SSE2-NEXT:    pand %xmm0, %xmm3
+; SSE2-NEXT:    por %xmm5, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pandn %xmm4, %xmm5
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7]
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7]
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
+; SSE2-NEXT:    pand %xmm5, %xmm1
 ; SSE2-NEXT:    pandn %xmm6, %xmm5
-; SSE2-NEXT:    por %xmm4, %xmm5
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,1,3,3]
-; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,1,1,0,4,5,6,7]
-; SSE2-NEXT:    pand %xmm4, %xmm0
-; SSE2-NEXT:    pandn %xmm3, %xmm4
-; SSE2-NEXT:    por %xmm0, %xmm4
-; SSE2-NEXT:    movdqu %xmm4, 32(%rdi)
-; SSE2-NEXT:    movdqu %xmm5, 16(%rdi)
-; SSE2-NEXT:    movdqu %xmm1, (%rdi)
+; SSE2-NEXT:    por %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm0, %xmm5
+; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,4,4]
+; SSE2-NEXT:    pandn %xmm1, %xmm0
+; SSE2-NEXT:    por %xmm5, %xmm0
+; SSE2-NEXT:    movdqu %xmm0, 16(%rdi)
+; SSE2-NEXT:    movdqu %xmm2, 32(%rdi)
+; SSE2-NEXT:    movdqu %xmm3, (%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: interleave_24i16_in:
@@ -1252,23 +1255,22 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
 ; SSE42-NEXT:    movdqu (%rdx), %xmm1
 ; SSE42-NEXT:    movdqu (%rcx), %xmm2
 ; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
-; SSE42-NEXT:    pshuflw {{.*#+}} xmm4 = xmm1[3,3,3,3,4,5,6,7]
-; SSE42-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,2,2]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
-; SSE42-NEXT:    movdqa %xmm0, %xmm4
-; SSE42-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE42-NEXT:    pshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
+; SSE42-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
 ; SSE42-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,0,0]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm5 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; SSE42-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE42-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; SSE42-NEXT:    movdqu %xmm0, 32(%rdi)
-; SSE42-NEXT:    movdqu %xmm5, (%rdi)
+; SSE42-NEXT:    pblendw {{.*#+}} xmm5 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,2,2]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6],xmm3[7]
+; SSE42-NEXT:    pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
+; SSE42-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
+; SSE42-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE42-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[4,5,6,7,u,u,8,9,10,11,u,u,12,13,14,15]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
+; SSE42-NEXT:    movdqu %xmm4, 32(%rdi)
 ; SSE42-NEXT:    movdqu %xmm3, 16(%rdi)
+; SSE42-NEXT:    movdqu %xmm5, (%rdi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: interleave_24i16_in:
@@ -1404,7 +1406,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[1,1,1,1]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm5[0,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,0],xmm10[2,0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm10[2,3]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm5[2,0]
 ; SSE2-NEXT:    movaps %xmm8, %xmm5
@@ -1412,7 +1414,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm9[1,1,1,1]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[0,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,0],xmm9[2,0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,1],xmm9[2,3]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm8[2,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,1],xmm9[3,3]
@@ -1631,109 +1633,114 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movups (%rsi), %xmm1
 ; SSE2-NEXT:    movups 16(%rsi), %xmm0
-; SSE2-NEXT:    movups (%rdx), %xmm2
+; SSE2-NEXT:    movups (%rdx), %xmm8
 ; SSE2-NEXT:    movups 16(%rdx), %xmm5
-; SSE2-NEXT:    movups (%rcx), %xmm8
-; SSE2-NEXT:    movups 16(%rcx), %xmm9
-; SSE2-NEXT:    movaps %xmm8, %xmm7
+; SSE2-NEXT:    movups (%rcx), %xmm3
+; SSE2-NEXT:    movups 16(%rcx), %xmm6
+; SSE2-NEXT:    movaps %xmm3, %xmm7
 ; SSE2-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[1,3]
-; SSE2-NEXT:    movaps %xmm1, %xmm3
-; SSE2-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm7[0,2]
+; SSE2-NEXT:    movaps %xmm1, %xmm9
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; SSE2-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,1],xmm7[0,2]
+; SSE2-NEXT:    movaps %xmm5, %xmm7
+; SSE2-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3]
+; SSE2-NEXT:    movaps %xmm6, %xmm4
+; SSE2-NEXT:    unpckhps {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm7[0,2]
 ; SSE2-NEXT:    movaps %xmm0, %xmm7
 ; SSE2-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm5[1]
-; SSE2-NEXT:    movaps %xmm9, %xmm6
+; SSE2-NEXT:    movaps %xmm6, %xmm2
+; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[1,3]
-; SSE2-NEXT:    movaps %xmm0, %xmm4
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm5[3,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm9[2,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm9 = xmm9[1,1],xmm5[1,1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,0],xmm7[0,2]
-; SSE2-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0,2]
-; SSE2-NEXT:    movaps %xmm1, %xmm5
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm2[1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,2],xmm8[2,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,1],xmm2[1,1]
-; SSE2-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,0],xmm5[0,2]
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0,1,3]
-; SSE2-NEXT:    movups %xmm8, 16(%rdi)
-; SSE2-NEXT:    movups %xmm4, 48(%rdi)
-; SSE2-NEXT:    movups %xmm9, 64(%rdi)
-; SSE2-NEXT:    movups %xmm3, (%rdi)
-; SSE2-NEXT:    movups %xmm1, 32(%rdi)
-; SSE2-NEXT:    movups %xmm0, 80(%rdi)
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2]
+; SSE2-NEXT:    movaps %xmm8, %xmm5
+; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,3],xmm3[3,3]
+; SSE2-NEXT:    movaps %xmm3, %xmm6
+; SSE2-NEXT:    unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; SSE2-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2]
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm8[1]
+; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm8[1,0]
+; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2]
+; SSE2-NEXT:    movups %xmm3, 16(%rdi)
+; SSE2-NEXT:    movups %xmm6, 32(%rdi)
+; SSE2-NEXT:    movups %xmm0, 48(%rdi)
+; SSE2-NEXT:    movups %xmm2, 64(%rdi)
+; SSE2-NEXT:    movups %xmm4, 80(%rdi)
+; SSE2-NEXT:    movups %xmm9, (%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: interleave_24i32_in:
 ; SSE42:       # %bb.0:
-; SSE42-NEXT:    movdqu (%rsi), %xmm0
+; SSE42-NEXT:    movdqu (%rsi), %xmm8
 ; SSE42-NEXT:    movdqu 16(%rsi), %xmm4
-; SSE42-NEXT:    movdqu (%rdx), %xmm9
+; SSE42-NEXT:    movdqu (%rdx), %xmm2
 ; SSE42-NEXT:    movdqu 16(%rdx), %xmm5
 ; SSE42-NEXT:    movdqu (%rcx), %xmm3
 ; SSE42-NEXT:    movdqu 16(%rcx), %xmm6
-; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm9[0,0,1,1]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3],xmm7[4,5,6,7]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[0,1,0,1]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm8 = xmm7[0,1,2,3],xmm8[4,5],xmm7[6,7]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm4[4,5],xmm7[6,7]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,0,1,1]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,1,0,1]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6,7]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm6[0,1,0,1]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm9[1,1,2,2]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5],xmm7[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5,6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,2,3,3]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm7 = xmm0[0,1,2,3],xmm7[4,5],xmm0[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5,6,7]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[0,1,0,1]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5],xmm4[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm8[2,3,2,3]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,2,3,3]
+; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5],xmm6[6,7]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
-; SSE42-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,3],xmm5[3,3]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3,4,5],xmm5[6,7]
-; SSE42-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm9[3,3]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3,4,5],xmm3[6,7]
-; SSE42-NEXT:    movdqu %xmm3, 32(%rdi)
-; SSE42-NEXT:    movdqu %xmm5, 80(%rdi)
+; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5],xmm2[6,7]
 ; SSE42-NEXT:    movdqu %xmm2, 16(%rdi)
-; SSE42-NEXT:    movdqu %xmm1, 48(%rdi)
-; SSE42-NEXT:    movdqu %xmm7, 64(%rdi)
-; SSE42-NEXT:    movdqu %xmm8, (%rdi)
+; SSE42-NEXT:    movdqu %xmm4, 32(%rdi)
+; SSE42-NEXT:    movdqu %xmm5, 48(%rdi)
+; SSE42-NEXT:    movdqu %xmm0, 64(%rdi)
+; SSE42-NEXT:    movdqu %xmm7, 80(%rdi)
+; SSE42-NEXT:    movdqu %xmm1, (%rdi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: interleave_24i32_in:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovupd (%rcx), %ymm0
-; AVX1-NEXT:    vmovups (%rdx), %xmm1
+; AVX1-NEXT:    vmovups (%rdx), %xmm0
+; AVX1-NEXT:    vmovups (%rsi), %xmm1
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd (%rcx), %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vmovups 16(%rcx), %xmm1
 ; AVX1-NEXT:    vmovups 16(%rdx), %xmm2
-; AVX1-NEXT:    vmovups (%rsi), %xmm3
-; AVX1-NEXT:    vmovups 16(%rsi), %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm2[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT:    vbroadcastsd (%rcx), %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vbroadcastsd 24(%rsi), %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
-; AVX1-NEXT:    vmovups %ymm0, 32(%rdi)
-; AVX1-NEXT:    vmovups %ymm1, (%rdi)
-; AVX1-NEXT:    vmovups %ymm2, 64(%rdi)
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vmovups %ymm2, 32(%rdi)
+; AVX1-NEXT:    vmovups %ymm1, 64(%rdi)
+; AVX1-NEXT:    vmovups %ymm0, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1742,26 +1749,26 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
 ; AVX2-SLOW-NEXT:    vmovups (%rsi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups (%rdx), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups (%rcx), %ymm2
-; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rsi), %ymm3
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd (%rcx), %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vbroadcastsd (%rcx), %ymm5
+; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rsi), %ymm5
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, 32(%rdi)
-; AVX2-SLOW-NEXT:    vmovups %ymm4, (%rdi)
-; AVX2-SLOW-NEXT:    vmovups %ymm3, 64(%rdi)
+; AVX2-SLOW-NEXT:    vmovups %ymm4, 64(%rdi)
+; AVX2-SLOW-NEXT:    vmovups %ymm3, (%rdi)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -1770,27 +1777,27 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
 ; AVX2-FAST-ALL-NEXT:    vmovups (%rsi), %ymm0
 ; AVX2-FAST-ALL-NEXT:    vmovups (%rdx), %ymm1
 ; AVX2-FAST-ALL-NEXT:    vmovups (%rcx), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm3 = <5,u,u,6,u,u,7,u>
+; AVX2-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2]
+; AVX2-FAST-ALL-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm3, %ymm3
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rsi), %ymm4
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd (%rcx), %ymm4
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2]
-; AVX2-FAST-ALL-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u>
 ; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm4, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd (%rcx), %ymm5
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rsi), %ymm5
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
 ; AVX2-FAST-ALL-NEXT:    vmovups %ymm0, 32(%rdi)
-; AVX2-FAST-ALL-NEXT:    vmovups %ymm4, (%rdi)
-; AVX2-FAST-ALL-NEXT:    vmovups %ymm3, 64(%rdi)
+; AVX2-FAST-ALL-NEXT:    vmovups %ymm4, 64(%rdi)
+; AVX2-FAST-ALL-NEXT:    vmovups %ymm3, (%rdi)
 ; AVX2-FAST-ALL-NEXT:    vzeroupper
 ; AVX2-FAST-ALL-NEXT:    retq
 ;
@@ -1799,58 +1806,57 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
 ; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups (%rdx), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups (%rcx), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rsi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rcx), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rcx), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rsi), %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, 32(%rdi)
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, (%rdi)
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, 64(%rdi)
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, 64(%rdi)
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, (%rdi)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; XOP-LABEL: interleave_24i32_in:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vmovups (%rsi), %ymm0
-; XOP-NEXT:    vmovups (%rdx), %ymm1
-; XOP-NEXT:    vpermil2ps {{.*#+}} ymm0 = ymm0[u,3],ymm1[3],ymm0[u,4],ymm1[4],ymm0[u,5]
 ; XOP-NEXT:    vmovups (%rcx), %ymm1
-; XOP-NEXT:    vmovups (%rdx), %xmm2
+; XOP-NEXT:    vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[u,3],ymm0[4],ymm1[u,4],ymm0[5]
+; XOP-NEXT:    vmovups (%rdx), %xmm1
+; XOP-NEXT:    vmovups (%rsi), %xmm2
+; XOP-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm2[1],xmm1[1]
+; XOP-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[1,1],xmm3[0,2]
+; XOP-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; XOP-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,1]
+; XOP-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; XOP-NEXT:    vbroadcastsd (%rcx), %ymm2
+; XOP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; XOP-NEXT:    vmovups 16(%rcx), %xmm2
 ; XOP-NEXT:    vmovups 16(%rdx), %xmm3
-; XOP-NEXT:    vmovups (%rsi), %xmm4
-; XOP-NEXT:    vmovups 16(%rsi), %xmm5
-; XOP-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm3[3,3]
-; XOP-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm3[1]
-; XOP-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm5[0,2]
-; XOP-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; XOP-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[2,3,2,3]
-; XOP-NEXT:    vpermilpd {{.*#+}} ymm5 = ymm5[0,0,3,3]
-; XOP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
-; XOP-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm2[1]
-; XOP-NEXT:    vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2]
-; XOP-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; XOP-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1]
-; XOP-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; XOP-NEXT:    vbroadcastsd (%rcx), %ymm4
-; XOP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
-; XOP-NEXT:    vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; XOP-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[3,0],xmm2[3,0]
+; XOP-NEXT:    vshufps {{.*#+}} xmm4 = xmm2[2,1],xmm4[0,2]
+; XOP-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0]
+; XOP-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[2,2]
+; XOP-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; XOP-NEXT:    vbroadcastsd 24(%rsi), %ymm3
+; XOP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; XOP-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
+; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
 ; XOP-NEXT:    vmovups %ymm0, 32(%rdi)
-; XOP-NEXT:    vmovups %ymm2, (%rdi)
-; XOP-NEXT:    vmovups %ymm3, 64(%rdi)
+; XOP-NEXT:    vmovups %ymm2, 64(%rdi)
+; XOP-NEXT:    vmovups %ymm1, (%rdi)
 ; XOP-NEXT:    vzeroupper
 ; XOP-NEXT:    retq
   %s1 = load <8 x i32>, <8 x i32>* %q1, align 4

diff  --git a/llvm/test/CodeGen/X86/pr34592.ll b/llvm/test/CodeGen/X86/pr34592.ll
index 9887dac76e20b..ecbdc24f5453f 100644
--- a/llvm/test/CodeGen/X86/pr34592.ll
+++ b/llvm/test/CodeGen/X86/pr34592.ll
@@ -14,34 +14,39 @@ define <16 x i64> @pluto(<16 x i64> %arg, <16 x i64> %arg1, <16 x i64> %arg2, <1
 ; CHECK-NEXT:    vmovaps %ymm4, %ymm10
 ; CHECK-NEXT:    vmovaps %ymm3, %ymm9
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm8
-; CHECK-NEXT:    vmovaps 240(%rbp), %ymm4
+; CHECK-NEXT:    vmovaps %ymm0, %ymm4
+; CHECK-NEXT:    vmovaps 240(%rbp), %ymm1
 ; CHECK-NEXT:    vmovaps 208(%rbp), %ymm3
-; CHECK-NEXT:    vmovaps 176(%rbp), %ymm1
-; CHECK-NEXT:    vmovaps 144(%rbp), %ymm1
+; CHECK-NEXT:    vmovaps 176(%rbp), %ymm0
+; CHECK-NEXT:    vmovaps 144(%rbp), %ymm0
 ; CHECK-NEXT:    vmovaps 112(%rbp), %ymm11
 ; CHECK-NEXT:    vmovaps 80(%rbp), %ymm11
 ; CHECK-NEXT:    vmovaps 48(%rbp), %ymm11
 ; CHECK-NEXT:    vmovaps 16(%rbp), %ymm11
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm2[6,7]
-; CHECK-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,1,2,1]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; CHECK-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm7[2,3],ymm6[0,1]
-; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; CHECK-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm7[0],ymm5[0],ymm7[2],ymm5[2]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3,4,5],ymm1[6,7]
-; CHECK-NEXT:    vmovaps %xmm3, %xmm4
-; CHECK-NEXT:    vmovaps %xmm7, %xmm3
-; CHECK-NEXT:    vpblendd {{.*#+}} xmm4 = xmm3[0,1],xmm4[2,3]
-; CHECK-NEXT:    # implicit-def: $ymm3
-; CHECK-NEXT:    vmovaps %xmm4, %xmm3
-; CHECK-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[0,0,1,3]
-; CHECK-NEXT:    vpslldq {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17,18,19,20,21,22,23]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5],ymm3[6,7]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm2[6,7]
+; CHECK-NEXT:    vmovaps %xmm3, %xmm6
+; CHECK-NEXT:    # implicit-def: $ymm2
+; CHECK-NEXT:    vinserti128 $1, %xmm6, %ymm2, %ymm2
+; CHECK-NEXT:    vpalignr {{.*#+}} ymm0 = ymm4[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,0]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
+; CHECK-NEXT:    vextracti128 $1, %ymm7, %xmm2
+; CHECK-NEXT:    vmovq {{.*#+}} xmm6 = xmm2[0],zero
+; CHECK-NEXT:    # implicit-def: $ymm2
+; CHECK-NEXT:    vmovaps %xmm6, %xmm2
+; CHECK-NEXT:    # kill: def $xmm4 killed $xmm4 killed $ymm4
+; CHECK-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm2
+; CHECK-NEXT:    vmovaps %xmm7, %xmm4
+; CHECK-NEXT:    vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
+; CHECK-NEXT:    # implicit-def: $ymm4
+; CHECK-NEXT:    vmovaps %xmm6, %xmm4
+; CHECK-NEXT:    vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3]
+; CHECK-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3],ymm7[4,5,6,7]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,1,3]
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm4 = ymm5[0,1,0,1,4,5,4,5]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
 ; CHECK-NEXT:    movq %rbp, %rsp
 ; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa %rsp, 8

diff  --git a/llvm/test/CodeGen/X86/pr44976.ll b/llvm/test/CodeGen/X86/pr44976.ll
index 7c8d5e099ca67..f16adfa6f0fc4 100644
--- a/llvm/test/CodeGen/X86/pr44976.ll
+++ b/llvm/test/CodeGen/X86/pr44976.ll
@@ -12,75 +12,57 @@ define <3 x i32> @f_29(<12 x i16> %a, <12 x i16> %b) {
 ; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
 ; CHECK-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; CHECK-NEXT:    movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; CHECK-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; CHECK-NEXT:    movd %r9d, %xmm1
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; CHECK-NEXT:    movd %r9d, %xmm0
 ; CHECK-NEXT:    movd %r8d, %xmm3
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; CHECK-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; CHECK-NEXT:    movd %ecx, %xmm1
+; CHECK-NEXT:    movd %ecx, %xmm0
 ; CHECK-NEXT:    movd %edx, %xmm2
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
 ; CHECK-NEXT:    movd %esi, %xmm4
-; CHECK-NEXT:    movd %edi, %xmm1
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; CHECK-NEXT:    movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; CHECK-NEXT:    pinsrw $1, {{[0-9]+}}(%rsp), %xmm4
-; CHECK-NEXT:    pinsrw $2, {{[0-9]+}}(%rsp), %xmm4
-; CHECK-NEXT:    pinsrw $3, {{[0-9]+}}(%rsp), %xmm4
+; CHECK-NEXT:    movd %edi, %xmm0
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; CHECK-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; CHECK-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    pinsrw $1, {{[0-9]+}}(%rsp), %xmm2
 ; CHECK-NEXT:    pinsrw $2, {{[0-9]+}}(%rsp), %xmm2
 ; CHECK-NEXT:    pinsrw $3, {{[0-9]+}}(%rsp), %xmm2
-; CHECK-NEXT:    movdqa %xmm1, %xmm3
-; CHECK-NEXT:    pmulhuw %xmm0, %xmm3
-; CHECK-NEXT:    pmullw %xmm0, %xmm1
-; CHECK-NEXT:    movdqa %xmm1, %xmm0
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[1,2,3,3]
-; CHECK-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; CHECK-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
-; CHECK-NEXT:    movdqa %xmm2, %xmm7
-; CHECK-NEXT:    pmulhuw %xmm4, %xmm7
-; CHECK-NEXT:    pshuflw {{.*#+}} xmm3 = xmm7[0,0,2,1,4,5,6,7]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0]
-; CHECK-NEXT:    movdqa {{.*#+}} xmm3 = [65535,0,65535,0,65535,0,65535,0]
-; CHECK-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
-; CHECK-NEXT:    pmullw %xmm4, %xmm2
-; CHECK-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,3,4,5,6,7]
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
-; CHECK-NEXT:    pand %xmm3, %xmm5
-; CHECK-NEXT:    pandn %xmm6, %xmm3
-; CHECK-NEXT:    por %xmm5, %xmm3
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
-; CHECK-NEXT:    movdqa %xmm3, %xmm4
-; CHECK-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
-; CHECK-NEXT:    movdqa %xmm0, %xmm5
-; CHECK-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,3],xmm4[2,0]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[3,3,3,3]
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
-; CHECK-NEXT:    movdqa %xmm3, %xmm6
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm1[0]
-; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[2,0]
-; CHECK-NEXT:    paddd %xmm5, %xmm0
-; CHECK-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; CHECK-NEXT:    paddd %xmm4, %xmm3
+; CHECK-NEXT:    movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT:    pinsrw $1, {{[0-9]+}}(%rsp), %xmm3
+; CHECK-NEXT:    pinsrw $2, {{[0-9]+}}(%rsp), %xmm3
+; CHECK-NEXT:    pinsrw $3, {{[0-9]+}}(%rsp), %xmm3
+; CHECK-NEXT:    movdqa %xmm0, %xmm4
+; CHECK-NEXT:    pmulhuw %xmm1, %xmm4
+; CHECK-NEXT:    pmullw %xmm1, %xmm0
 ; CHECK-NEXT:    movdqa %xmm0, %xmm1
-; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
-; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,3]
-; CHECK-NEXT:    paddd %xmm1, %xmm0
+; CHECK-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; CHECK-NEXT:    movdqa %xmm3, %xmm4
+; CHECK-NEXT:    pmulhuw %xmm2, %xmm4
+; CHECK-NEXT:    pmullw %xmm2, %xmm3
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; CHECK-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
+; CHECK-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,3,1,3]
+; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; CHECK-NEXT:    paddd %xmm2, %xmm0
+; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
+; CHECK-NEXT:    paddd %xmm4, %xmm1
+; CHECK-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
+; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3]
+; CHECK-NEXT:    paddd %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %a32 = zext <12 x i16> %a to <12 x i32>

diff  --git a/llvm/test/CodeGen/X86/splat-for-size.ll b/llvm/test/CodeGen/X86/splat-for-size.ll
index 4d986f67be21d..932b23bd37ba5 100644
--- a/llvm/test/CodeGen/X86/splat-for-size.ll
+++ b/llvm/test/CodeGen/X86/splat-for-size.ll
@@ -385,13 +385,27 @@ define <32 x i8> @splat_v32i8_pgso(<32 x i8> %x) !prof !14 {
 @A = common dso_local global <3 x i64> zeroinitializer, align 32
 
 define <8 x i64> @pr23259() #1 {
-; CHECK-LABEL: pr23259:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmovaps A+16(%rip), %xmm0
-; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
-; CHECK-NEXT:    retq
+; AVX-LABEL: pr23259:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq $1
+; AVX-NEXT:    popq %rax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: pr23259:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vmovdqa A(%rip), %ymm0
+; AVX2-NEXT:    pushq $1
+; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    vmovq %rax, %xmm1
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,1,1]
+; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
+; AVX2-NEXT:    retq
 entry:
   %0 = load <4 x i64>, <4 x i64>* bitcast (<3 x i64>* @A to <4 x i64>*), align 32
   %1 = shufflevector <4 x i64> %0, <4 x i64> undef, <3 x i32> <i32 undef, i32 undef, i32 2>

diff  --git a/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll b/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
index 08088496b7428..abd0e1beb635d 100644
--- a/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
+++ b/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
@@ -5,9 +5,10 @@
 define <4 x i64> @autogen_SD88863() {
 ; CHECK-LABEL: autogen_SD88863:
 ; CHECK:       # %bb.0: # %BB
-; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[3],ymm1[3]
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
 ; CHECK-NEXT:    movb $1, %al
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_1: # %CF

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 3432195dd86bb..4d7be08e28190 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -131,55 +131,58 @@ define void @vf2(<10 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1
 define void @vf4(<20 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2, <4 x i16>* %out.vec3, <4 x i16>* %out.vec4) nounwind {
 ; SSE-LABEL: vf4:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm2
-; SSE-NEXT:    movdqa 16(%rdi), %xmm3
+; SSE-NEXT:    movdqa (%rdi), %xmm3
+; SSE-NEXT:    movdqa 16(%rdi), %xmm2
 ; SSE-NEXT:    movdqa 32(%rdi), %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[3,1,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[3,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm8 = xmm4[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm0, %xmm9
+; SSE-NEXT:    psllq $48, %xmm9
+; SSE-NEXT:    movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT:    movdqa %xmm2, %xmm6
+; SSE-NEXT:    psrlq $48, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[1,2,2,3,4,5,6,7]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; SSE-NEXT:    pand %xmm10, %xmm5
+; SSE-NEXT:    movdqa %xmm10, %xmm11
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,1,1,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
+; SSE-NEXT:    punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; SSE-NEXT:    pand %xmm10, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,1,1,1]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE-NEXT:    movdqa %xmm3, %xmm4
-; SSE-NEXT:    psrlq $48, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,3,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[1,2,2,3,4,5,6,7]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,3,2,1]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,1,1,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
-; SSE-NEXT:    punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[3,1,2,0]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[1,2,0,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[1,1,1,1]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm7[0,3,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,3,4,5,6,7]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm7 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[3,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,6,6,7]
+; SSE-NEXT:    movdqa %xmm10, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[3,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0,2]
+; SSE-NEXT:    movdqa %xmm10, %xmm3
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,3,4,5,6,7]
+; SSE-NEXT:    pand %xmm10, %xmm1
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    pand %xmm7, %xmm2
-; SSE-NEXT:    pandn %xmm0, %xmm7
-; SSE-NEXT:    por %xmm2, %xmm7
-; SSE-NEXT:    movq %xmm1, (%rsi)
-; SSE-NEXT:    movq %xmm4, (%rdx)
-; SSE-NEXT:    movq %xmm5, (%rcx)
-; SSE-NEXT:    movq %xmm6, (%r8)
-; SSE-NEXT:    movq %xmm7, (%r9)
+; SSE-NEXT:    pand %xmm10, %xmm2
+; SSE-NEXT:    pandn %xmm9, %xmm10
+; SSE-NEXT:    por %xmm10, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,1,1]
+; SSE-NEXT:    pandn %xmm6, %xmm11
+; SSE-NEXT:    por %xmm11, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm4
+; SSE-NEXT:    pslld $16, %xmm0
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    por %xmm3, %xmm1
+; SSE-NEXT:    por %xmm4, %xmm2
+; SSE-NEXT:    movq %xmm8, (%rsi)
+; SSE-NEXT:    movq %xmm5, (%rdx)
+; SSE-NEXT:    movq %xmm7, (%rcx)
+; SSE-NEXT:    movq %xmm1, (%r8)
+; SSE-NEXT:    movq %xmm2, (%r9)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf4:

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index cc32b5450bff9..52303572d11cb 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -167,24 +167,29 @@ define void @vf4(<24 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    movdqa 16(%rdi), %xmm1
-; SSE-NEXT:    movdqa 32(%rdi), %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,1,0,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm6, %xmm2
-; SSE-NEXT:    pandn %xmm5, %xmm2
-; SSE-NEXT:    movdqa %xmm1, %xmm7
-; SSE-NEXT:    psrld $16, %xmm7
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
-; SSE-NEXT:    punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; SSE-NEXT:    pand %xmm6, %xmm4
-; SSE-NEXT:    por %xmm2, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm5[2,2,3,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm5[0,3,2,3]
+; SSE-NEXT:    movdqa 32(%rdi), %xmm3
+; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT:    movdqa %xmm4, %xmm9
+; SSE-NEXT:    pandn %xmm3, %xmm9
+; SSE-NEXT:    movdqa %xmm3, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[2,2,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,3,2,3]
+; SSE-NEXT:    pslld $16, %xmm3
+; SSE-NEXT:    movdqa %xmm4, %xmm2
+; SSE-NEXT:    pandn %xmm3, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,6,6,7]
+; SSE-NEXT:    punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; SSE-NEXT:    pand %xmm4, %xmm6
+; SSE-NEXT:    por %xmm2, %xmm6
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrld $16, %xmm2
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
+; SSE-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT:    pand %xmm4, %xmm3
+; SSE-NEXT:    por %xmm9, %xmm3
 ; SSE-NEXT:    psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT:    movdqa %xmm6, %xmm2
+; SSE-NEXT:    movdqa %xmm4, %xmm2
 ; SSE-NEXT:    pandn %xmm5, %xmm2
 ; SSE-NEXT:    movdqa %xmm0, %xmm5
 ; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[0,0]
@@ -192,31 +197,29 @@ define void @vf4(<24 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm5[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[0,3,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[1,0,2,3,4,5,6,7]
-; SSE-NEXT:    pand %xmm6, %xmm7
+; SSE-NEXT:    pand %xmm4, %xmm7
 ; SSE-NEXT:    por %xmm2, %xmm7
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[3,1,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
-; SSE-NEXT:    pand %xmm6, %xmm3
-; SSE-NEXT:    pandn %xmm8, %xmm6
-; SSE-NEXT:    por %xmm3, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
+; SSE-NEXT:    pand %xmm4, %xmm2
+; SSE-NEXT:    pandn %xmm8, %xmm4
+; SSE-NEXT:    por %xmm2, %xmm4
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm9[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm10[0,2,2,3,4,5,6,7]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
 ; SSE-NEXT:    psrlq $48, %xmm1
 ; SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm9[1,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm10[1,3,2,3,4,5,6,7]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT:    movq %xmm2, (%rsi)
-; SSE-NEXT:    movq %xmm4, (%rdx)
+; SSE-NEXT:    movq %xmm6, (%rsi)
+; SSE-NEXT:    movq %xmm3, (%rdx)
 ; SSE-NEXT:    movq %xmm7, (%rcx)
-; SSE-NEXT:    movq %xmm6, (%r8)
+; SSE-NEXT:    movq %xmm4, (%r8)
 ; SSE-NEXT:    movq %xmm5, (%r9)
 ; SSE-NEXT:    movq %xmm0, (%rax)
 ; SSE-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
index a95f2550c31e0..89ec6447f9567 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
@@ -85,14 +85,14 @@ define void @load_i32_stride3_vf4(<12 x i32>* %in.vec, <4 x i32>* %out.vec0, <4
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    movaps 32(%rdi), %xmm2
 ; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,1],xmm1[3,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[1,1,1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm2[1,1]
 ; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,1],xmm2[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[2,0]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
 ; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[0,3]
 ; SSE-NEXT:    movaps %xmm3, (%rsi)
@@ -173,36 +173,36 @@ define void @load_i32_stride3_vf8(<24 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 ; SSE-NEXT:    movaps 80(%rdi), %xmm8
 ; SSE-NEXT:    movaps 64(%rdi), %xmm3
 ; SSE-NEXT:    movdqa (%rdi), %xmm1
-; SSE-NEXT:    movaps 16(%rdi), %xmm6
+; SSE-NEXT:    movaps 16(%rdi), %xmm5
 ; SSE-NEXT:    movaps 32(%rdi), %xmm10
 ; SSE-NEXT:    movdqa 48(%rdi), %xmm2
 ; SSE-NEXT:    movdqa %xmm1, %xmm11
-; SSE-NEXT:    movaps %xmm6, %xmm7
+; SSE-NEXT:    movaps %xmm10, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,1],xmm5[3,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm6[0,0]
-; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm6[1,1,1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm10[1,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,3],xmm6[0,2]
-; SSE-NEXT:    movdqa %xmm2, %xmm6
-; SSE-NEXT:    movaps %xmm3, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm5[0,0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm5[1,1,1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,3],xmm10[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,3],xmm5[0,2]
+; SSE-NEXT:    movdqa %xmm2, %xmm5
+; SSE-NEXT:    movaps %xmm8, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,1],xmm3[3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[2,3,2,3]
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[0,0]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm12 = xmm3[1,1,1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm8[1,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,3],xmm3[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,1],xmm8[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,1],xmm10[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm8[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[2,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[2,0]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[0,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm8[0,3]
-; SSE-NEXT:    movaps %xmm6, 16(%rsi)
+; SSE-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm8[0,3]
+; SSE-NEXT:    movaps %xmm5, 16(%rsi)
 ; SSE-NEXT:    movaps %xmm11, (%rsi)
 ; SSE-NEXT:    movaps %xmm2, 16(%rdx)
 ; SSE-NEXT:    movaps %xmm1, (%rdx)
-; SSE-NEXT:    movaps %xmm5, 16(%rcx)
+; SSE-NEXT:    movaps %xmm6, 16(%rcx)
 ; SSE-NEXT:    movaps %xmm0, (%rcx)
 ; SSE-NEXT:    retq
 ;
@@ -358,95 +358,95 @@ define void @load_i32_stride3_vf8(<24 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
 define void @load_i32_stride3_vf16(<48 x i32>* %in.vec, <16 x i32>* %out.vec0, <16 x i32>* %out.vec1, <16 x i32>* %out.vec2) nounwind {
 ; SSE-LABEL: load_i32_stride3_vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movaps 96(%rdi), %xmm14
-; SSE-NEXT:    movaps 128(%rdi), %xmm11
-; SSE-NEXT:    movaps 112(%rdi), %xmm12
-; SSE-NEXT:    movaps 144(%rdi), %xmm3
-; SSE-NEXT:    movaps 176(%rdi), %xmm13
+; SSE-NEXT:    movaps 96(%rdi), %xmm10
+; SSE-NEXT:    movaps 128(%rdi), %xmm6
+; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 112(%rdi), %xmm13
+; SSE-NEXT:    movaps 144(%rdi), %xmm1
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 176(%rdi), %xmm7
 ; SSE-NEXT:    movaps 160(%rdi), %xmm5
 ; SSE-NEXT:    movaps (%rdi), %xmm15
-; SSE-NEXT:    movaps 16(%rdi), %xmm8
-; SSE-NEXT:    movaps 32(%rdi), %xmm6
-; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 48(%rdi), %xmm10
-; SSE-NEXT:    movaps 80(%rdi), %xmm9
-; SSE-NEXT:    movaps 64(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[1,0]
-; SSE-NEXT:    movaps %xmm10, %xmm4
+; SSE-NEXT:    movaps 16(%rdi), %xmm9
+; SSE-NEXT:    movaps 32(%rdi), %xmm8
+; SSE-NEXT:    movaps 48(%rdi), %xmm11
+; SSE-NEXT:    movaps 80(%rdi), %xmm12
+; SSE-NEXT:    movaps 64(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm12[1,1]
+; SSE-NEXT:    movaps %xmm11, %xmm4
 ; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movaps %xmm5, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm13[1,0]
-; SSE-NEXT:    movaps %xmm3, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm8, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm7[1,1]
+; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm9, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm8[1,1]
 ; SSE-NEXT:    movaps %xmm15, %xmm1
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm13, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm6[1,1]
+; SSE-NEXT:    movaps %xmm10, %xmm14
+; SSE-NEXT:    movaps %xmm10, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm12, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm11[1,0]
-; SSE-NEXT:    movaps %xmm14, %xmm1
-; SSE-NEXT:    movaps %xmm14, %xmm7
-; SSE-NEXT:    movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm10, %xmm6
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,0],xmm2[0,0]
-; SSE-NEXT:    movaps %xmm2, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm9[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm3, %xmm4
-; SSE-NEXT:    movaps %xmm3, %xmm14
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,0],xmm5[0,0]
-; SSE-NEXT:    movaps %xmm5, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm13[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm7, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm12[0,0]
-; SSE-NEXT:    movaps %xmm12, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm11[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm15[2,3,2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,0],xmm8[0,0]
-; SSE-NEXT:    movaps %xmm8, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm3[3,3]
+; SSE-NEXT:    movaps %xmm11, %xmm10
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[1,0],xmm3[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[2,0]
+; SSE-NEXT:    movaps %xmm7, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm5[3,3]
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1],xmm7[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[0,2],xmm3[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm10[2,3,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,1,1]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm14[2,3,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm13[0,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm8[1,1,1,1]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[0,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm12[1,1,1,1]
-; SSE-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm7 = mem[2,3,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm11[0,3]
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm3, 32(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm3, (%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm3, 48(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm3, 16(%rsi)
-; SSE-NEXT:    movaps %xmm1, 32(%rdx)
+; SSE-NEXT:    movaps %xmm7, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,0],xmm5[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[2,0]
+; SSE-NEXT:    movaps %xmm6, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm13[3,3]
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm1, %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm13[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[2,0]
+; SSE-NEXT:    movaps %xmm8, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm9[3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,0],xmm9[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[2,0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm11[2,3,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm12[0,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[2,3,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
+; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm5 = xmm5[0,1],mem[0,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[0,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm6 = xmm6[0,1],mem[0,3]
+; SSE-NEXT:    movaps %xmm14, 32(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, (%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 48(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 16(%rsi)
+; SSE-NEXT:    movaps %xmm2, 32(%rdx)
 ; SSE-NEXT:    movaps %xmm15, (%rdx)
 ; SSE-NEXT:    movaps %xmm4, 48(%rdx)
-; SSE-NEXT:    movaps %xmm6, 16(%rdx)
-; SSE-NEXT:    movaps %xmm7, 32(%rcx)
-; SSE-NEXT:    movaps %xmm0, (%rcx)
+; SSE-NEXT:    movaps %xmm10, 16(%rdx)
+; SSE-NEXT:    movaps %xmm6, 32(%rcx)
+; SSE-NEXT:    movaps %xmm1, (%rcx)
 ; SSE-NEXT:    movaps %xmm5, 48(%rcx)
-; SSE-NEXT:    movaps %xmm2, 16(%rcx)
+; SSE-NEXT:    movaps %xmm3, 16(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_i32_stride3_vf16:
@@ -690,144 +690,150 @@ define void @load_i32_stride3_vf16(<48 x i32>* %in.vec, <16 x i32>* %out.vec0, <
 define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <32 x i32>* %out.vec1, <32 x i32>* %out.vec2) nounwind {
 ; SSE-LABEL: load_i32_stride3_vf32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    subq $344, %rsp # imm = 0x158
-; SSE-NEXT:    movaps 336(%rdi), %xmm1
-; SSE-NEXT:    movaps 368(%rdi), %xmm9
-; SSE-NEXT:    movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 352(%rdi), %xmm14
+; SSE-NEXT:    subq $360, %rsp # imm = 0x168
+; SSE-NEXT:    movaps 336(%rdi), %xmm11
+; SSE-NEXT:    movaps 368(%rdi), %xmm14
 ; SSE-NEXT:    movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 240(%rdi), %xmm15
-; SSE-NEXT:    movaps 272(%rdi), %xmm13
-; SSE-NEXT:    movaps 256(%rdi), %xmm7
+; SSE-NEXT:    movaps 352(%rdi), %xmm13
+; SSE-NEXT:    movaps 240(%rdi), %xmm12
+; SSE-NEXT:    movaps 272(%rdi), %xmm6
+; SSE-NEXT:    movaps 256(%rdi), %xmm1
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 144(%rdi), %xmm10
+; SSE-NEXT:    movaps 176(%rdi), %xmm3
+; SSE-NEXT:    movaps 160(%rdi), %xmm7
 ; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 144(%rdi), %xmm3
-; SSE-NEXT:    movaps 176(%rdi), %xmm12
-; SSE-NEXT:    movaps 160(%rdi), %xmm10
 ; SSE-NEXT:    movaps 48(%rdi), %xmm5
-; SSE-NEXT:    movaps 80(%rdi), %xmm6
-; SSE-NEXT:    movaps 64(%rdi), %xmm8
-; SSE-NEXT:    movaps %xmm8, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[1,0]
-; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm5, %xmm2
 ; SSE-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm10, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm12[1,0]
-; SSE-NEXT:    movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm3, %xmm2
-; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 80(%rdi), %xmm8
+; SSE-NEXT:    movaps 64(%rdi), %xmm9
+; SSE-NEXT:    movaps %xmm9, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm8[1,1]
+; SSE-NEXT:    movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm5, %xmm2
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movaps %xmm7, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm13[1,0]
-; SSE-NEXT:    movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm15, %xmm2
-; SSE-NEXT:    movaps %xmm15, %xmm4
-; SSE-NEXT:    movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
+; SSE-NEXT:    movaps %xmm3, %xmm5
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm10, %xmm2
+; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm14, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[1,0]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm6[1,1]
+; SSE-NEXT:    movaps %xmm6, %xmm15
+; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm12, %xmm2
+; SSE-NEXT:    movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 16(%rdi), %xmm7
-; SSE-NEXT:    movaps 32(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm7, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
+; SSE-NEXT:    movaps %xmm13, %xmm2
+; SSE-NEXT:    movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm13, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm14[1,1]
+; SSE-NEXT:    movaps %xmm11, %xmm1
+; SSE-NEXT:    movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm11, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 16(%rdi), %xmm4
+; SSE-NEXT:    movaps 32(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm4, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
 ; SSE-NEXT:    movaps (%rdi), %xmm11
-; SSE-NEXT:    movaps %xmm11, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 128(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, (%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps 112(%rdi), %xmm14
-; SSE-NEXT:    movaps %xmm14, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
-; SSE-NEXT:    movaps 96(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 224(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 208(%rdi), %xmm9
-; SSE-NEXT:    movaps %xmm9, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
-; SSE-NEXT:    movaps 192(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 320(%rdi), %xmm15
-; SSE-NEXT:    movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm11, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 128(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 112(%rdi), %xmm7
+; SSE-NEXT:    movaps %xmm7, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
+; SSE-NEXT:    movaps 96(%rdi), %xmm6
+; SSE-NEXT:    movaps %xmm6, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 224(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 208(%rdi), %xmm13
+; SSE-NEXT:    movaps %xmm13, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
+; SSE-NEXT:    movaps 192(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 320(%rdi), %xmm3
 ; SSE-NEXT:    movaps 304(%rdi), %xmm0
 ; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[1,0]
-; SSE-NEXT:    movaps 288(%rdi), %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm5, %xmm2
-; SSE-NEXT:    movaps %xmm8, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm8[0,0]
-; SSE-NEXT:    movaps %xmm8, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm6[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm10[0,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[3,1],xmm12[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
+; SSE-NEXT:    movaps %xmm3, %xmm14
 ; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm4, %xmm15
+; SSE-NEXT:    movaps 288(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, (%rsp) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,1],xmm9[3,3]
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm8[2,0]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm5, %xmm0
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm5[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[1,0],xmm5[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[2,0]
+; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm15, %xmm0
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm3[3,3]
+; SSE-NEXT:    movaps %xmm12, %xmm15
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,0],xmm3[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[2,0]
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,0],xmm0[0,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm13[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm2[3,3]
 ; SSE-NEXT:    movaps %xmm1, %xmm12
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[1,0],xmm0[0,0]
-; SSE-NEXT:    shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm0 = xmm0[3,1],mem[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[1,0],xmm2[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[2,0]
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm8, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm7[3,3]
+; SSE-NEXT:    movaps %xmm6, %xmm1
 ; SSE-NEXT:    movaps %xmm6, %xmm10
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[1,0],xmm14[0,0]
-; SSE-NEXT:    movaps %xmm14, %xmm0
-; SSE-NEXT:    movaps (%rsp), %xmm8 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm8[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
-; SSE-NEXT:    movaps %xmm9, %xmm0
-; SSE-NEXT:    shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm0 = xmm0[3,1],mem[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[1,0],xmm7[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[2,0]
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, %xmm1
+; SSE-NEXT:    movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm13[3,3]
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,0],xmm13[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[2,0]
+; SSE-NEXT:    movaps %xmm14, %xmm2
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,1],xmm5[3,3]
+; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm5[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm2[2,0]
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0]
-; SSE-NEXT:    shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm2 = xmm2[3,1],mem[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm11[2,3,2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,0],xmm7[0,0]
-; SSE-NEXT:    movaps %xmm7, %xmm4
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,1],xmm13[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,2],xmm4[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,1,1]
+; SSE-NEXT:    movaps %xmm2, %xmm14
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[2,1],xmm4[3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm11[2,3,2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,0],xmm4[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,2],xmm14[2,0]
+; SSE-NEXT:    pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm14 = mem[1,1,1,1]
 ; SSE-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm5 = mem[2,3,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
 ; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm5 = xmm5[0,1],mem[0,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[1,1,1,1]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm13[0,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,1],xmm2[0,3]
 ; SSE-NEXT:    pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm4 = mem[1,1,1,1]
 ; SSE-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
@@ -835,8 +841,8 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
 ; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm13 = xmm13[0,1],mem[0,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm14[1,1,1,1]
-; SSE-NEXT:    pshufd {{.*#+}} xmm14 = xmm6[2,3,2,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[1,1,1,1]
+; SSE-NEXT:    pshufd {{.*#+}} xmm14 = xmm1[2,3,2,3]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm14 = xmm14[0],xmm4[0],xmm14[1],xmm4[1]
 ; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,1],xmm8[0,3]
 ; SSE-NEXT:    pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
@@ -846,12 +852,6 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
 ; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm4 = xmm4[0,1],mem[0,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm9[1,1,1,1]
-; SSE-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm9 = mem[2,3,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
-; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm9 = xmm9[0,1],mem[0,3]
 ; SSE-NEXT:    pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm8 = mem[1,1,1,1]
 ; SSE-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
@@ -861,45 +861,52 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
 ; SSE-NEXT:    # xmm7 = xmm7[0,1],mem[0,3]
 ; SSE-NEXT:    pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm8 = mem[1,1,1,1]
+; SSE-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = mem[2,3,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = xmm1[0,1],mem[0,3]
+; SSE-NEXT:    pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm8 = mem[1,1,1,1]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
 ; SSE-NEXT:    shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; SSE-NEXT:    # xmm0 = xmm0[0,1],mem[0,3]
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 96(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 64(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 32(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, (%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 112(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 80(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 48(%rsi)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm6, 16(%rsi)
-; SSE-NEXT:    movaps %xmm1, 96(%rdx)
-; SSE-NEXT:    movaps %xmm3, 64(%rdx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 96(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 64(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 32(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, (%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 112(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 80(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 48(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 16(%rsi)
+; SSE-NEXT:    movaps %xmm3, 96(%rdx)
+; SSE-NEXT:    movaps %xmm6, 64(%rdx)
 ; SSE-NEXT:    movaps %xmm10, 32(%rdx)
 ; SSE-NEXT:    movaps %xmm11, (%rdx)
 ; SSE-NEXT:    movaps %xmm12, 112(%rdx)
 ; SSE-NEXT:    movaps %xmm15, 80(%rdx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 48(%rdx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 16(%rdx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 48(%rdx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm2, 16(%rdx)
 ; SSE-NEXT:    movaps %xmm0, 96(%rcx)
-; SSE-NEXT:    movaps %xmm7, 112(%rcx)
-; SSE-NEXT:    movaps %xmm9, 64(%rcx)
+; SSE-NEXT:    movaps %xmm1, 112(%rcx)
+; SSE-NEXT:    movaps %xmm7, 64(%rcx)
 ; SSE-NEXT:    movaps %xmm4, 80(%rcx)
 ; SSE-NEXT:    movaps %xmm14, 32(%rcx)
 ; SSE-NEXT:    movaps %xmm13, 48(%rcx)
-; SSE-NEXT:    movaps %xmm2, (%rcx)
+; SSE-NEXT:    movaps %xmm9, (%rcx)
 ; SSE-NEXT:    movaps %xmm5, 16(%rcx)
-; SSE-NEXT:    addq $344, %rsp # imm = 0x158
+; SSE-NEXT:    addq $360, %rsp # imm = 0x168
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_i32_stride3_vf32:

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
index f9452dbeb1d4b..b2fee112d4e3d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
@@ -103,17 +103,20 @@ define void @load_i64_stride3_vf4(<12 x i64>* %in.vec, <4 x i64>* %out.vec0, <4
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovapd 32(%rdi), %ymm0
 ; AVX1-NEXT:    vmovapd (%rdi), %ymm1
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3]
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm3
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm3, %ymm3
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3]
+; AVX1-NEXT:    vmovapd 16(%rdi), %xmm2
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3]
+; AVX1-NEXT:    vmovaps 64(%rdi), %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm5
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-NEXT:    vbroadcastsd 80(%rdi), %ymm4
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3]
+; AVX1-NEXT:    vbroadcastsd 80(%rdi), %ymm5
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3]
-; AVX1-NEXT:    vmovapd %ymm2, (%rsi)
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3]
+; AVX1-NEXT:    vmovapd %ymm3, (%rsi)
 ; AVX1-NEXT:    vmovapd %ymm1, (%rdx)
 ; AVX1-NEXT:    vmovapd %ymm0, (%rcx)
 ; AVX1-NEXT:    vzeroupper
@@ -220,28 +223,34 @@ define void @load_i64_stride3_vf8(<24 x i64>* %in.vec, <8 x i64>* %out.vec0, <8
 ; AVX1-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX1-NEXT:    vmovapd 128(%rdi), %ymm2
 ; AVX1-NEXT:    vmovapd 96(%rdi), %ymm3
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm3[0,1],ymm2[2],ymm3[3]
-; AVX1-NEXT:    vmovaps 112(%rdi), %xmm5
-; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm5, %ymm5
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm1[0,1],ymm0[2],ymm1[3]
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm7
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm7, %ymm7
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2],ymm7[3]
+; AVX1-NEXT:    vmovapd 112(%rdi), %xmm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0],ymm4[1],ymm5[2,3]
+; AVX1-NEXT:    vmovaps 160(%rdi), %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm7
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm5[0,1,2],ymm7[3]
+; AVX1-NEXT:    vmovapd 16(%rdi), %xmm7
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0],ymm7[1],ymm8[2,3]
+; AVX1-NEXT:    vmovaps 64(%rdi), %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm10
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-NEXT:    vbroadcastsd 80(%rdi), %ymm8
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm8[3]
+; AVX1-NEXT:    vbroadcastsd 80(%rdi), %ymm10
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm10[3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[1],ymm2[0],ymm3[3],ymm2[2]
-; AVX1-NEXT:    vbroadcastsd 176(%rdi), %ymm8
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3]
+; AVX1-NEXT:    vbroadcastsd 176(%rdi), %ymm10
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm10[3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2],ymm0[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2],ymm2[3]
-; AVX1-NEXT:    vmovapd %ymm6, (%rsi)
-; AVX1-NEXT:    vmovapd %ymm4, 32(%rsi)
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3]
+; AVX1-NEXT:    vmovapd %ymm8, (%rsi)
+; AVX1-NEXT:    vmovapd %ymm9, 32(%rsi)
 ; AVX1-NEXT:    vmovapd %ymm3, 32(%rdx)
 ; AVX1-NEXT:    vmovapd %ymm1, (%rdx)
 ; AVX1-NEXT:    vmovapd %ymm2, 32(%rcx)
@@ -432,66 +441,92 @@ define void @load_i64_stride3_vf16(<48 x i64>* %in.vec, <16 x i64>* %out.vec0, <
 ;
 ; AVX1-LABEL: load_i64_stride3_vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd 224(%rdi), %ymm0
-; AVX1-NEXT:    vmovapd 192(%rdi), %ymm1
-; AVX1-NEXT:    vmovapd 320(%rdi), %ymm2
-; AVX1-NEXT:    vmovapd 288(%rdi), %ymm3
-; AVX1-NEXT:    vmovapd 32(%rdi), %ymm4
-; AVX1-NEXT:    vmovapd (%rdi), %ymm5
-; AVX1-NEXT:    vmovapd 128(%rdi), %ymm6
-; AVX1-NEXT:    vmovapd 96(%rdi), %ymm8
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm8[0,1],ymm6[2],ymm8[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm1[0,1],ymm0[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[1],ymm0[0],ymm9[3],ymm0[2]
-; AVX1-NEXT:    vbroadcastsd 272(%rdi), %ymm10
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm6[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[1],ymm6[0],ymm8[3],ymm6[2]
-; AVX1-NEXT:    vbroadcastsd 176(%rdi), %ymm10
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[1],ymm4[0],ymm10[3],ymm4[2]
-; AVX1-NEXT:    vbroadcastsd 80(%rdi), %ymm11
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm11 = ymm3[0,1],ymm2[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm11 = ymm11[1],ymm2[0],ymm11[3],ymm2[2]
-; AVX1-NEXT:    vbroadcastsd 368(%rdi), %ymm12
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3]
-; AVX1-NEXT:    vmovaps 112(%rdi), %xmm12
-; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm12, %ymm12
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2],ymm12[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm4[2],ymm5[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm12[0],ymm6[1],ymm12[2],ymm6[3]
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm12
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2],ymm12[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm12[0],ymm4[1],ymm12[2],ymm4[3]
-; AVX1-NEXT:    vmovaps 304(%rdi), %xmm12
-; AVX1-NEXT:    vinsertf128 $1, 352(%rdi), %ymm12, %ymm12
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0],ymm12[1],ymm3[2],ymm12[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3]
+; AVX1-NEXT:    subq $72, %rsp
+; AVX1-NEXT:    vmovapd 320(%rdi), %ymm5
+; AVX1-NEXT:    vmovapd 288(%rdi), %ymm12
+; AVX1-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovapd 32(%rdi), %ymm2
+; AVX1-NEXT:    vmovapd (%rdi), %ymm9
+; AVX1-NEXT:    vmovapd 128(%rdi), %ymm3
+; AVX1-NEXT:    vmovapd 96(%rdi), %ymm14
+; AVX1-NEXT:    vmovapd 112(%rdi), %xmm0
+; AVX1-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm14[0,1],ymm3[2],ymm14[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3]
+; AVX1-NEXT:    vmovaps 160(%rdi), %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm4[3]
+; AVX1-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovapd 16(%rdi), %xmm8
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm9[0,1],ymm2[2],ymm9[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3]
+; AVX1-NEXT:    vmovaps 64(%rdi), %xmm7
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm6
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm6[3]
+; AVX1-NEXT:    vmovupd %ymm0, (%rsp) # 32-byte Spill
+; AVX1-NEXT:    vmovapd 304(%rdi), %xmm11
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm12[0,1],ymm5[2],ymm12[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3]
+; AVX1-NEXT:    vmovaps 352(%rdi), %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm12
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm12[3]
+; AVX1-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovapd 224(%rdi), %ymm13
+; AVX1-NEXT:    vmovapd 192(%rdi), %ymm0
+; AVX1-NEXT:    vmovapd 208(%rdi), %xmm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm13[2],ymm0[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0],ymm4[1],ymm12[2,3]
+; AVX1-NEXT:    vmovaps 256(%rdi), %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm15
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm13[0],ymm0[3],ymm13[2]
+; AVX1-NEXT:    vbroadcastsd 272(%rdi), %ymm15
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3]
+; AVX1-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm3[2,3]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm14 = ymm14[1],ymm3[0],ymm14[3],ymm3[2]
+; AVX1-NEXT:    vbroadcastsd 176(%rdi), %ymm15
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm2[2,3]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[1],ymm2[0],ymm9[3],ymm2[2]
+; AVX1-NEXT:    vbroadcastsd 80(%rdi), %ymm15
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm15[3]
+; AVX1-NEXT:    vblendpd $3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm15 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm15 = mem[0,1],ymm5[2,3]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm15 = ymm15[1],ymm5[0],ymm15[3],ymm5[2]
+; AVX1-NEXT:    vbroadcastsd 368(%rdi), %ymm0
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm15, %ymm10
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm10[0],ymm3[1],ymm10[2],ymm3[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm8, %ymm7
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3]
-; AVX1-NEXT:    vmovaps 208(%rdi), %xmm12
-; AVX1-NEXT:    vinsertf128 $1, 256(%rdi), %ymm12, %ymm12
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2],ymm12[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
-; AVX1-NEXT:    vmovapd %ymm1, 64(%rsi)
-; AVX1-NEXT:    vmovapd %ymm3, 96(%rsi)
-; AVX1-NEXT:    vmovapd %ymm5, (%rsi)
-; AVX1-NEXT:    vmovapd %ymm7, 32(%rsi)
-; AVX1-NEXT:    vmovapd %ymm11, 96(%rdx)
-; AVX1-NEXT:    vmovapd %ymm10, (%rdx)
-; AVX1-NEXT:    vmovapd %ymm8, 32(%rdx)
-; AVX1-NEXT:    vmovapd %ymm9, 64(%rdx)
-; AVX1-NEXT:    vmovapd %ymm0, 64(%rcx)
-; AVX1-NEXT:    vmovapd %ymm2, 96(%rcx)
-; AVX1-NEXT:    vmovapd %ymm4, (%rcx)
-; AVX1-NEXT:    vmovapd %ymm6, 32(%rcx)
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2],ymm2[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm11, %ymm6
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],mem[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm13[0,1],mem[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3]
+; AVX1-NEXT:    vmovapd %ymm12, 64(%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm4, 96(%rsi)
+; AVX1-NEXT:    vmovups (%rsp), %ymm4 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm4, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm4, 32(%rsi)
+; AVX1-NEXT:    vmovapd %ymm0, 96(%rdx)
+; AVX1-NEXT:    vmovapd %ymm9, (%rdx)
+; AVX1-NEXT:    vmovapd %ymm14, 32(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 64(%rdx)
+; AVX1-NEXT:    vmovapd %ymm1, 64(%rcx)
+; AVX1-NEXT:    vmovapd %ymm5, 96(%rcx)
+; AVX1-NEXT:    vmovapd %ymm2, (%rcx)
+; AVX1-NEXT:    vmovapd %ymm3, 32(%rcx)
+; AVX1-NEXT:    addq $72, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
index 108dfff2c60fe..f2a8343272b36 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
@@ -249,10 +249,9 @@ define void @load_i64_stride6_vf4(<24 x i64>* %in.vec, <4 x i64>* %out.vec0, <4
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = <10,0,6,u>
 ; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vmovdqa 160(%rdi), %xmm6
-; AVX512-NEXT:    vpbroadcastq %xmm6, %ymm7
-; AVX512-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm7[6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512-NEXT:    vpbroadcastq 160(%rdi), %ymm6
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
+; AVX512-NEXT:    vinserti128 $1, 160(%rdi), %ymm0, %ymm6
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = <11,1,7,u>
 ; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm7
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index d86d11430ab83..4a07232cb6a21 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -2,8 +2,8 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST-ALL
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST-PERLANE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST,AVX2-FAST-ALL
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST,AVX2-FAST-PERLANE
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
@@ -74,12 +74,12 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
 ; SSE-NEXT:    pandn %xmm2, %xmm3
 ; SSE-NEXT:    por %xmm4, %xmm3
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,0,0,65535,65535,65535,65535,65535]
+; SSE-NEXT:    pand %xmm2, %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7]
-; SSE-NEXT:    pand %xmm2, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    por %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm0, %xmm2
+; SSE-NEXT:    por %xmm1, %xmm2
 ; SSE-NEXT:    movq %xmm2, 16(%rcx)
 ; SSE-NEXT:    movdqa %xmm3, (%rcx)
 ; SSE-NEXT:    retq
@@ -149,42 +149,42 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
 define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.vecptr2, <24 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    movdqa (%rdi), %xmm3
 ; SSE-NEXT:    movdqa (%rsi), %xmm2
-; SSE-NEXT:    movdqa (%rdx), %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,0,0,0]
-; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm4, %xmm5
-; SSE-NEXT:    pandn %xmm1, %xmm5
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm4, %xmm1
-; SSE-NEXT:    por %xmm5, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,2,2]
-; SSE-NEXT:    pand %xmm4, %xmm5
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm2[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pandn %xmm6, %xmm4
-; SSE-NEXT:    por %xmm5, %xmm4
-; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm5, %xmm4
+; SSE-NEXT:    movdqa (%rdx), %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    movdqa %xmm0, %xmm5
+; SSE-NEXT:    pandn %xmm4, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
+; SSE-NEXT:    pand %xmm0, %xmm3
+; SSE-NEXT:    por %xmm5, %xmm3
+; SSE-NEXT:    movdqa %xmm0, %xmm5
+; SSE-NEXT:    pandn %xmm4, %xmm5
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    por %xmm5, %xmm2
+; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
+; SSE-NEXT:    pand %xmm5, %xmm1
 ; SSE-NEXT:    pandn %xmm6, %xmm5
-; SSE-NEXT:    por %xmm4, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm4, %xmm0
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    por %xmm0, %xmm4
-; SSE-NEXT:    movdqa %xmm4, 32(%rcx)
-; SSE-NEXT:    movdqa %xmm5, 16(%rcx)
-; SSE-NEXT:    movdqa %xmm1, (%rcx)
+; SSE-NEXT:    por %xmm1, %xmm5
+; SSE-NEXT:    pand %xmm0, %xmm5
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    movdqa %xmm0, 16(%rcx)
+; SSE-NEXT:    movdqa %xmm2, 32(%rcx)
+; SSE-NEXT:    movdqa %xmm3, (%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf8:
@@ -308,146 +308,133 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <48 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm8
-; SSE-NEXT:    movdqa 16(%rdi), %xmm10
-; SSE-NEXT:    movdqa (%rsi), %xmm12
-; SSE-NEXT:    movdqa 16(%rsi), %xmm7
+; SSE-NEXT:    movdqa (%rdi), %xmm4
+; SSE-NEXT:    movdqa 16(%rdi), %xmm7
+; SSE-NEXT:    movdqa (%rsi), %xmm3
+; SSE-NEXT:    movdqa 16(%rsi), %xmm5
 ; SSE-NEXT:    movdqa (%rdx), %xmm9
-; SSE-NEXT:    movdqa 16(%rdx), %xmm11
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[1,1,2,2]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm7[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm2, %xmm0
-; SSE-NEXT:    pandn %xmm3, %xmm0
-; SSE-NEXT:    por %xmm1, %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm3, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm3, %xmm5
-; SSE-NEXT:    pandn %xmm1, %xmm5
+; SSE-NEXT:    movdqa 16(%rdx), %xmm10
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm7[3,3,3,3]
+; SSE-NEXT:    movdqa {{.*#+}} xmm14 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    movdqa %xmm14, %xmm0
+; SSE-NEXT:    pandn %xmm2, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[1,1,2,2]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm11 = xmm5[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm14, %xmm5
 ; SSE-NEXT:    por %xmm0, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pandn %xmm0, %xmm4
-; SSE-NEXT:    movdqa %xmm10, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    por %xmm4, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[1,1,2,2]
-; SSE-NEXT:    pand %xmm2, %xmm0
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm12[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm2, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm6
-; SSE-NEXT:    por %xmm0, %xmm6
-; SSE-NEXT:    pand %xmm3, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,2,2]
-; SSE-NEXT:    pandn %xmm0, %xmm3
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm8, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm10[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm14, %xmm0
+; SSE-NEXT:    pandn %xmm2, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm7[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,4,5]
+; SSE-NEXT:    pand %xmm14, %xmm2
+; SSE-NEXT:    por %xmm0, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm14, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm12 = xmm4[1,1,2,2]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm13 = xmm3[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm14, %xmm3
+; SSE-NEXT:    por %xmm7, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm9[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm14, %xmm6
+; SSE-NEXT:    pandn %xmm7, %xmm6
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,2,4,5,6,7]
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm2, %xmm4
-; SSE-NEXT:    pandn %xmm0, %xmm2
-; SSE-NEXT:    por %xmm4, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    movdqa %xmm4, %xmm6
-; SSE-NEXT:    pandn %xmm0, %xmm6
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm4, %xmm0
-; SSE-NEXT:    por %xmm6, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm9[2,2,3,3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm12[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm4, %xmm7
-; SSE-NEXT:    pandn %xmm6, %xmm4
-; SSE-NEXT:    por %xmm7, %xmm4
-; SSE-NEXT:    movdqa %xmm4, 32(%rcx)
-; SSE-NEXT:    movdqa %xmm0, 80(%rcx)
-; SSE-NEXT:    movdqa %xmm2, (%rcx)
-; SSE-NEXT:    movdqa %xmm3, 16(%rcx)
-; SSE-NEXT:    movdqa %xmm1, 48(%rcx)
-; SSE-NEXT:    movdqa %xmm5, 64(%rcx)
+; SSE-NEXT:    pand %xmm14, %xmm4
+; SSE-NEXT:    por %xmm6, %xmm4
+; SSE-NEXT:    movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm6, %xmm7
+; SSE-NEXT:    pandn %xmm8, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,2,2]
+; SSE-NEXT:    pand %xmm6, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pand %xmm14, %xmm0
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm11[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm14, %xmm1
+; SSE-NEXT:    pandn %xmm7, %xmm1
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,2,2]
+; SSE-NEXT:    pand %xmm6, %xmm0
+; SSE-NEXT:    pandn %xmm12, %xmm6
+; SSE-NEXT:    por %xmm0, %xmm6
+; SSE-NEXT:    pand %xmm14, %xmm6
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pandn %xmm0, %xmm14
+; SSE-NEXT:    por %xmm6, %xmm14
+; SSE-NEXT:    movdqa %xmm14, 16(%rcx)
+; SSE-NEXT:    movdqa %xmm1, 64(%rcx)
+; SSE-NEXT:    movdqa %xmm4, (%rcx)
+; SSE-NEXT:    movdqa %xmm3, 32(%rcx)
+; SSE-NEXT:    movdqa %xmm2, 48(%rcx)
+; SSE-NEXT:    movdqa %xmm5, 80(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2]
 ; AVX1-NEXT:    vmovdqa (%rsi), %xmm3
 ; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm4
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[3,3,3,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3,4],xmm5[5],xmm2[6,7]
 ; AVX1-NEXT:    vmovdqa (%rdx), %xmm5
 ; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm6
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm7[0],xmm2[1,2],xmm7[3],xmm2[4,5],xmm7[6],xmm2[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm7[0],xmm2[1,2],xmm7[3],xmm2[4,5],xmm7[6],xmm2[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX1-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm11 = xmm7[0],xmm2[1],xmm7[2,3],xmm2[4],xmm7[5,6],xmm2[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX1-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX1-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm7[0],xmm2[1,2],xmm7[3],xmm2[4,5],xmm7[6],xmm2[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,0,1,2,3,u,u,4,5,6,7,u,u,8,9]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm9 = xmm2[0],xmm7[1,2],xmm2[3],xmm7[4,5],xmm2[6],xmm7[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,6,7,u,u,8,9,10,11,u,u,12,13,14,15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,10,11,u,u,8,9,14,15,u,u,12,13]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2],xmm2[3],xmm7[4,5],xmm2[6],xmm7[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6],xmm1[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX1-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm0, 32(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm1, 48(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm2, (%rcx)
-; AVX1-NEXT:    vmovdqa %xmm11, 80(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm10, 16(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm8, 64(%rcx)
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
+; AVX1-NEXT:    vmovdqa %xmm0, (%rcx)
+; AVX1-NEXT:    vmovdqa %xmm1, 64(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm2, 80(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm10, 32(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm9, 48(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm8, 16(%rcx)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf16:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,2,2]
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX2-SLOW-NEXT:    vmovdqa 16(%rsi), %xmm6
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[3,3,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3,4],xmm7[5],xmm4[6,7]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <5,5,u,6,6,u,7,7>
-; AVX2-SLOW-NEXT:    vpermd %ymm0, %ymm4, %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm3, %ymm4, %ymm3
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[3,3,3,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
@@ -455,111 +442,73 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
-; AVX2-SLOW-NEXT:    vpermd %ymm0, %ymm4, %ymm4
+; AVX2-SLOW-NEXT:    vpermd %ymm1, %ymm4, %ymm4
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,3,3,u,4,4,u,5>
-; AVX2-SLOW-NEXT:    vpermd (%rdi), %ymm4, %ymm4
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdx), %xmm6
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u>
-; AVX2-SLOW-NEXT:    vpermd %ymm0, %ymm4, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpermd %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,3,3,u,4,4,u,5>
+; AVX2-SLOW-NEXT:    vpermd (%rdi), %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 32(%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%rcx)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%rcx)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
-; AVX2-FAST-ALL-LABEL: vf16:
-; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa 16(%rsi), %xmm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u>
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm4, %xmm3, %xmm5
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX2-FAST-ALL-NEXT:    vmovdqa 16(%rdi), %xmm7
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2],xmm1[3,4],xmm5[5],xmm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm3, %ymm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = <5,5,u,6,6,u,7,7>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm3, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm4, %xmm2, %xmm3
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,0,0,u,1,1,u,2>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm3, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,3,3,u,4,4,u,5>
-; AVX2-FAST-ALL-NEXT:    vpermd (%rdi), %ymm3, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm4 = ymm8[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm4, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 32(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, (%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 64(%rcx)
-; AVX2-FAST-ALL-NEXT:    vzeroupper
-; AVX2-FAST-ALL-NEXT:    retq
-;
-; AVX2-FAST-PERLANE-LABEL: vf16:
-; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 16(%rsi), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm3, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 16(%rdi), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2],xmm1[3,4],xmm5[5],xmm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <5,5,u,6,6,u,7,7>
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm3, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm2, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,0,0,u,1,1,u,2>
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm3, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,3,3,u,4,4,u,5>
-; AVX2-FAST-PERLANE-NEXT:    vpermd (%rdi), %ymm3, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm8[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm4, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 32(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 64(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vzeroupper
-; AVX2-FAST-PERLANE-NEXT:    retq
+; AVX2-FAST-LABEL: vf16:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u]
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm5
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7]
+; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,0,0,u,1,1,u,2>
+; AVX2-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[1,1,2,2]
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdx), %xmm6
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3],xmm3[4],xmm7[5,6],xmm3[7]
+; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,u,3,3,u,4,4,u>
+; AVX2-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,3,3,u,4,4,u,5>
+; AVX2-FAST-NEXT:    vpermd (%rdi), %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm3, 64(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rcx)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
 ;
 ; AVX512-LABEL: vf16:
 ; AVX512:       # %bb.0:
@@ -591,496 +540,426 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>* %in.vecptr2, <96 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa 16(%rdi), %xmm8
-; SSE-NEXT:    movdqa 32(%rdi), %xmm11
-; SSE-NEXT:    movdqa 48(%rdi), %xmm0
-; SSE-NEXT:    movdqa 16(%rsi), %xmm7
-; SSE-NEXT:    movdqa 32(%rsi), %xmm6
-; SSE-NEXT:    movdqa 48(%rsi), %xmm10
+; SSE-NEXT:    movdqa 16(%rdi), %xmm5
+; SSE-NEXT:    movdqa 32(%rdi), %xmm4
+; SSE-NEXT:    movdqa 48(%rdi), %xmm15
+; SSE-NEXT:    movdqa 16(%rsi), %xmm8
+; SSE-NEXT:    movdqa 32(%rsi), %xmm7
+; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 48(%rsi), %xmm13
+; SSE-NEXT:    movdqa 16(%rdx), %xmm6
 ; SSE-NEXT:    movdqa 32(%rdx), %xmm9
-; SSE-NEXT:    movdqa 48(%rdx), %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm0, %xmm12
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT:    pand %xmm5, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm10[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm3
+; SSE-NEXT:    movdqa 48(%rdx), %xmm12
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm15[3,3,3,3]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm15, %xmm1
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
+; SSE-NEXT:    movdqa %xmm13, %xmm3
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm0, %xmm3
+; SSE-NEXT:    por %xmm2, %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm12[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
 ; SSE-NEXT:    pandn %xmm2, %xmm3
-; SSE-NEXT:    por %xmm1, %xmm3
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm2, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm2, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
-; SSE-NEXT:    por %xmm3, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm5, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    movdqa %xmm12, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm15 = xmm1[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm5, %xmm15
-; SSE-NEXT:    por %xmm3, %xmm15
-; SSE-NEXT:    movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[1,1,2,2]
-; SSE-NEXT:    pand %xmm5, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm6[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    por %xmm1, %xmm4
-; SSE-NEXT:    pand %xmm2, %xmm4
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,5]
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    por %xmm3, %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm4, %xmm1
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; SSE-NEXT:    movdqa %xmm7, %xmm3
 ; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm9[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm2, %xmm13
-; SSE-NEXT:    pandn %xmm1, %xmm13
-; SSE-NEXT:    por %xmm4, %xmm13
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm9[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm5, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    movdqa %xmm11, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm0, %xmm11
+; SSE-NEXT:    por %xmm2, %xmm11
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm9[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    pandn %xmm2, %xmm3
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm5, %xmm9
-; SSE-NEXT:    por %xmm3, %xmm9
-; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm7[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm8[1,1,2,2]
-; SSE-NEXT:    pand %xmm5, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm3
-; SSE-NEXT:    movdqa 16(%rdx), %xmm11
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm2, %xmm14
-; SSE-NEXT:    pandn %xmm1, %xmm14
-; SSE-NEXT:    pand %xmm2, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm14
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm5, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
+; SSE-NEXT:    pshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,7,5,4,5]
+; SSE-NEXT:    pand %xmm0, %xmm10
+; SSE-NEXT:    por %xmm3, %xmm10
+; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm8, %xmm3
 ; SSE-NEXT:    movdqa %xmm8, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; SSE-NEXT:    movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm0, %xmm7
+; SSE-NEXT:    por %xmm2, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm6[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm5, %xmm1
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm5, %xmm4
-; SSE-NEXT:    por %xmm3, %xmm4
-; SSE-NEXT:    movdqa (%rsi), %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm0
-; SSE-NEXT:    pandn %xmm3, %xmm0
-; SSE-NEXT:    movdqa (%rdi), %xmm8
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,7,5,4,5]
+; SSE-NEXT:    pand %xmm0, %xmm6
+; SSE-NEXT:    por %xmm2, %xmm6
+; SSE-NEXT:    movdqa (%rdi), %xmm9
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm9[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    pandn %xmm2, %xmm1
+; SSE-NEXT:    movdqa (%rsi), %xmm14
+; SSE-NEXT:    movdqa (%rdx), %xmm8
+; SSE-NEXT:    movdqa %xmm14, %xmm2
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,5,5,6,7]
+; SSE-NEXT:    pand %xmm0, %xmm5
+; SSE-NEXT:    por %xmm1, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm8[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    pandn %xmm2, %xmm1
+; SSE-NEXT:    movdqa %xmm9, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,7,5,4,5]
+; SSE-NEXT:    pand %xmm0, %xmm4
+; SSE-NEXT:    por %xmm1, %xmm4
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm15[1,1,2,2]
+; SSE-NEXT:    movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm15, %xmm1
+; SSE-NEXT:    pandn %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm12[1,1,2,2]
+; SSE-NEXT:    pand %xmm15, %xmm2
+; SSE-NEXT:    por %xmm1, %xmm2
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm13[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm0, %xmm12
+; SSE-NEXT:    pandn %xmm1, %xmm12
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    por %xmm2, %xmm12
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = mem[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm15, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = mem[1,1,2,2]
+; SSE-NEXT:    pand %xmm15, %xmm1
+; SSE-NEXT:    por %xmm2, %xmm1
+; SSE-NEXT:    pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm2 = mem[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm0, %xmm13
+; SSE-NEXT:    pandn %xmm2, %xmm13
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    por %xmm1, %xmm13
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = mem[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm15, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = mem[1,1,2,2]
+; SSE-NEXT:    pand %xmm15, %xmm1
+; SSE-NEXT:    por %xmm2, %xmm1
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm3[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm3, %xmm2
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    por %xmm1, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm9[1,1,2,2]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm8[1,1,2,2]
-; SSE-NEXT:    pand %xmm5, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm0
-; SSE-NEXT:    pand %xmm2, %xmm0
-; SSE-NEXT:    movdqa (%rdx), %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm12 = xmm3[1,1,2,2]
-; SSE-NEXT:    pandn %xmm12, %xmm2
-; SSE-NEXT:    por %xmm0, %xmm2
-; SSE-NEXT:    movdqa %xmm8, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,4,5]
-; SSE-NEXT:    pand %xmm5, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[0,0,0,0]
-; SSE-NEXT:    pandn %xmm7, %xmm5
-; SSE-NEXT:    por %xmm0, %xmm5
-; SSE-NEXT:    pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm0 = mem[2,2,3,3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    movdqa %xmm12, %xmm7
-; SSE-NEXT:    pandn %xmm0, %xmm7
-; SSE-NEXT:    punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm10 = xmm10[4],mem[4],xmm10[5],mem[5],xmm10[6],mem[6],xmm10[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm10 = xmm0[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm12, %xmm10
-; SSE-NEXT:    por %xmm7, %xmm10
-; SSE-NEXT:    pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm0 = mem[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm12, %xmm7
-; SSE-NEXT:    pandn %xmm0, %xmm7
-; SSE-NEXT:    punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm12, %xmm0
-; SSE-NEXT:    por %xmm7, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm11 = xmm11[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm12, %xmm7
-; SSE-NEXT:    pandn %xmm11, %xmm7
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm12, %xmm6
-; SSE-NEXT:    por %xmm7, %xmm6
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[2,1,1,0,4,5,6,7]
-; SSE-NEXT:    pand %xmm12, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm12
-; SSE-NEXT:    por %xmm1, %xmm12
-; SSE-NEXT:    movdqa %xmm12, 32(%rcx)
-; SSE-NEXT:    movdqa %xmm6, 80(%rcx)
-; SSE-NEXT:    movdqa %xmm0, 128(%rcx)
-; SSE-NEXT:    movdqa %xmm10, 176(%rcx)
-; SSE-NEXT:    movdqa %xmm5, (%rcx)
-; SSE-NEXT:    movdqa %xmm2, 16(%rcx)
-; SSE-NEXT:    movdqa %xmm4, 48(%rcx)
-; SSE-NEXT:    movdqa %xmm14, 64(%rcx)
-; SSE-NEXT:    movdqa %xmm9, 96(%rcx)
+; SSE-NEXT:    pand %xmm15, %xmm3
+; SSE-NEXT:    pandn %xmm1, %xmm15
+; SSE-NEXT:    por %xmm3, %xmm15
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm14[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pand %xmm0, %xmm15
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    por %xmm15, %xmm0
+; SSE-NEXT:    movdqa %xmm0, 16(%rcx)
+; SSE-NEXT:    movdqa %xmm2, 64(%rcx)
 ; SSE-NEXT:    movdqa %xmm13, 112(%rcx)
-; SSE-NEXT:    movdqa %xmm15, 144(%rcx)
+; SSE-NEXT:    movdqa %xmm12, 160(%rcx)
+; SSE-NEXT:    movdqa %xmm4, (%rcx)
+; SSE-NEXT:    movdqa %xmm5, 32(%rcx)
+; SSE-NEXT:    movdqa %xmm6, 48(%rcx)
+; SSE-NEXT:    movdqa %xmm7, 80(%rcx)
+; SSE-NEXT:    movdqa %xmm10, 96(%rcx)
+; SSE-NEXT:    movdqa %xmm11, 128(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 144(%rcx)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 160(%rcx)
+; SSE-NEXT:    movaps %xmm0, 176(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm12
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm15
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm9
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm4[1,1,2,2]
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm13
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm8
-; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm6
-; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm1
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm11
-; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm2
-; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1,2],xmm7[3],xmm3[4,5],xmm7[6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2],xmm3[3,4],xmm7[5],xmm3[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1,2],xmm7[3],xmm3[4,5],xmm7[6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2],xmm3[3,4],xmm7[5],xmm3[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm11[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1,2],xmm7[3],xmm3[4,5],xmm7[6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm12[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm13[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm3[0,1],xmm7[2],xmm3[3,4],xmm7[5],xmm3[6,7]
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1,2],xmm5[3],xmm7[4,5],xmm5[6],xmm7[7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm13
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm7
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[1,1,2,2]
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm6
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm15
+; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7]
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm13[1,1,2,2]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3,4],xmm4[5],xmm0[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7]
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7]
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1],xmm4[2,3],xmm0[4],xmm4[5,6],xmm0[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm12 = xmm0[0,1],xmm4[2],xmm0[3,4],xmm4[5],xmm0[6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,0,1,2,3,6,7,4,5,6,7,4,5,8,9]
+; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm11 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [4,5,6,7,4,5,8,9,10,11,10,11,12,13,14,15]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm7[3,3,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm6[2],xmm0[3,4],xmm6[5],xmm0[6,7]
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm0
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX1-NEXT:    vpshufb %xmm10, %xmm7, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2],xmm7[3,4],xmm5[5],xmm7[6,7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX1-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm14 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm14[1],xmm5[2,3],xmm14[4],xmm5[5,6],xmm14[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX1-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX1-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7]
-; AVX1-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6],xmm4[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX1-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm11[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX1-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5,6],xmm7[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX1-NEXT:    vpshufb %xmm10, %xmm7, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1],xmm3[2],xmm7[3,4],xmm3[5],xmm7[6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, (%rcx)
-; AVX1-NEXT:    vmovdqa %xmm6, 32(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm4, 48(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm2, 80(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm1, 96(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm0, 176(%rcx)
-; AVX1-NEXT:    vmovdqa %xmm5, 128(%rcx)
+; AVX1-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6,7]
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm5
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX1-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,0,1,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5],xmm9[6],xmm8[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[3,3,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,2,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1,2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3],xmm2[4],xmm5[5,6],xmm2[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX1-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3,4],xmm5[5],xmm2[6,7]
+; AVX1-NEXT:    vmovdqa %xmm2, (%rcx)
+; AVX1-NEXT:    vmovdqa %xmm0, 64(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm4, 80(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm1, 32(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm8, 48(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm3, 96(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm6, 128(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm11, 144(%rcx)
+; AVX1-NEXT:    vmovdqa %xmm12, 160(%rcx)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 144(%rcx)
+; AVX1-NEXT:    vmovaps %xmm0, 176(%rcx)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps %xmm0, 16(%rcx)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 64(%rcx)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps %xmm0, 112(%rcx)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 160(%rcx)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf32:
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm11
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm14
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX2-SLOW-NEXT:    vmovdqa 48(%rsi), %xmm5
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm6, %xmm6
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,3,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm3, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <5,5,u,6,6,u,7,7>
-; AVX2-SLOW-NEXT:    vpermd %ymm2, %ymm13, %ymm6
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm3, %ymm6, %ymm10
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm4
-; AVX2-SLOW-NEXT:    vmovdqa 16(%rsi), %xmm7
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm11
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm6
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm1
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,3,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,3,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm7[2],xmm1[3,4],xmm7[5],xmm1[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpermd %ymm14, %ymm13, %ymm1
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm0, %ymm1, %ymm12
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm6[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm4[3,3,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,0,0,u,1,1,u,2>
+; AVX2-SLOW-NEXT:    vpermd %ymm10, %ymm13, %ymm7
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm7, %ymm8
+; AVX2-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa 48(%rdx), %xmm7
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31>
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm7, %ymm15
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm4[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[3,3,3,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2],xmm0[3,4],xmm7[5],xmm0[6,7]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm4, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
-; AVX2-SLOW-NEXT:    vpermd %ymm14, %ymm4, %ymm7
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm0, %ymm7, %ymm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[3,3,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1],xmm1[2],xmm7[3,4],xmm1[5],xmm7[6,7]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm1
-; AVX2-SLOW-NEXT:    vpermd %ymm2, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm1, %ymm3, %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
-; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm11, %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,3,3,u,4,4,u,5>
-; AVX2-SLOW-NEXT:    vpermd 32(%rdi), %ymm5, %ymm6
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm4, %ymm6, %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <2,u,3,3,u,4,4,u>
-; AVX2-SLOW-NEXT:    vpermd %ymm2, %ymm6, %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm4, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm9, %ymm3
-; AVX2-SLOW-NEXT:    vpermd (%rdi), %ymm5, %ymm4
-; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vpermd %ymm14, %ymm6, %ymm4
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 32(%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 128(%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 96(%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm12, 64(%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm10, 160(%rcx)
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpermd %ymm11, %ymm13, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdx), %xmm1
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5,6],xmm6[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm9, %ymm2
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u>
+; AVX2-SLOW-NEXT:    vpermd %ymm11, %ymm4, %ymm6
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm3, %ymm6, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,3,3,u,4,4,u,5>
+; AVX2-SLOW-NEXT:    vpermd 32(%rdi), %ymm6, %ymm11
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm3, %ymm11, %ymm3
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm9, %ymm2
+; AVX2-SLOW-NEXT:    vpermd %ymm10, %ymm4, %ymm4
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm2, %ymm4, %ymm2
+; AVX2-SLOW-NEXT:    vpermd (%rdi), %ymm6, %ymm4
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 32(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 128(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 96(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm15, 160(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
-; AVX2-FAST-ALL-LABEL: vf32:
-; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %xmm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u>
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm2, %xmm1, %xmm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3,4],xmm4[5],xmm7[6,7]
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,0,0,u,1,1,u,2>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm11, %ymm7
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm1, %ymm7, %ymm9
-; AVX2-FAST-ALL-NEXT:    vmovdqa 48(%rdi), %xmm7
-; AVX2-FAST-ALL-NEXT:    vmovdqa 48(%rsi), %xmm1
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1],xmm1[2],xmm7[3,4],xmm1[5],xmm7[6,7]
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm2, %xmm3, %xmm5
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2],xmm7[3,4],xmm5[5],xmm7[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 16(%rsi), %xmm6
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm5, %ymm3, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <5,5,u,6,6,u,7,7>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm7, %ymm11, %ymm10
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm10, %ymm11
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm7, %ymm5, %ymm8
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm1, %ymm8, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3,4],xmm2[5],xmm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm5, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm3, %ymm6, %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,3,3,u,4,4,u,5>
-; AVX2-FAST-ALL-NEXT:    vpermd 32(%rdi), %ymm5, %ymm6
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm4, %ymm6, %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm6
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm3, %ymm6, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpermd (%rdi), %ymm5, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm5, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,u,3,3,u,4,4,u>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm7, %ymm5, %ymm6
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm4, %ymm6, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm5, %ymm0
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 32(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm4, 128(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, 64(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm11, 96(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 160(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm9, (%rcx)
-; AVX2-FAST-ALL-NEXT:    vzeroupper
-; AVX2-FAST-ALL-NEXT:    retq
-;
-; AVX2-FAST-PERLANE-LABEL: vf32:
-; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm1, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3,4],xmm4[5],xmm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,0,0,u,1,1,u,2>
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm11, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm1, %ymm7, %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 48(%rdi), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 48(%rsi), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1],xmm1[2],xmm7[3,4],xmm1[5],xmm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm3, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2],xmm7[3,4],xmm5[5],xmm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 16(%rsi), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm3, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <5,5,u,6,6,u,7,7>
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm7, %ymm11, %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm3, %ymm10, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm7, %ymm5, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm1, %ymm8, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3,4],xmm2[5],xmm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm5, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm6, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,3,3,u,4,4,u,5>
-; AVX2-FAST-PERLANE-NEXT:    vpermd 32(%rdi), %ymm5, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm4, %ymm6, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm6, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpermd (%rdi), %ymm5, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm3, %ymm5, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,u,3,3,u,4,4,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm7, %ymm5, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm4, %ymm6, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm5, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 32(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, 128(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 64(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm11, 96(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 160(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, (%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vzeroupper
-; AVX2-FAST-PERLANE-NEXT:    retq
+; AVX2-FAST-LABEL: vf32:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm10
+; AVX2-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdx), %ymm12
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX2-FAST-NEXT:    vmovdqa 32(%rsi), %xmm4
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm7
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm6
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2],xmm2[3,4],xmm7[5],xmm2[6,7]
+; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,0,0,u,1,1,u,2>
+; AVX2-FAST-NEXT:    vpermd %ymm11, %ymm14, %ymm7
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm7, %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
+; AVX2-FAST-NEXT:    vmovdqa 48(%rdx), %xmm7
+; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31>
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm7, %ymm8
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm0
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3,4],xmm0[5],xmm7[6,7]
+; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vpermd %ymm12, %ymm14, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdx), %xmm1
+; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5,6],xmm6[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u>
+; AVX2-FAST-NEXT:    vpermd %ymm12, %ymm4, %ymm6
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm3, %ymm6, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,3,3,u,4,4,u,5>
+; AVX2-FAST-NEXT:    vpermd 32(%rdi), %ymm6, %ymm9
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm3, %ymm9, %ymm3
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
+; AVX2-FAST-NEXT:    vpermd %ymm11, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT:    vpermd (%rdi), %ymm6, %ymm4
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, 32(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm3, 128(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm1, 64(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, 96(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm8, 160(%rcx)
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovaps %ymm0, (%rcx)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
 ;
 ; AVX512-LABEL: vf32:
 ; AVX512:       # %bb.0:
@@ -1091,15 +970,15 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
 ; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,32,3,4,33,6,7,34,9,10,35,12,13,36,15,16,37,18,19,38,21,22,39,24,25,40,27,28,41,30,31]
 ; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,11,43,u,12,44,u,13,45,u,14,46,u,15,47,u,16,48,u,17,49,u,18,50,u,19,51,u,20,52,u,21>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [42,1,2,43,4,5,44,7,8,45,10,11,46,13,14,47,16,17,48,19,20,49,22,23,50,25,26,51,28,29,52,31]
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <21,u,54,22,u,55,23,u,56,24,u,57,25,u,58,26,u,59,27,u,60,28,u,61,29,u,62,30,u,63,31,u>
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm1, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,53,2,3,54,5,6,55,8,9,56,11,12,57,14,15,58,17,18,59,20,21,60,23,24,61,26,27,62,29,30,63]
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm0
-; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rcx)
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <10,43,u,11,44,u,12,45,u,13,46,u,14,47,u,15,48,u,16,49,u,17,50,u,18,51,u,19,52,u,20,53>
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,43,3,4,44,6,7,45,9,10,46,12,13,47,15,16,48,18,19,49,21,22,50,24,25,51,27,28,52,30,31]
+; AVX512-NEXT:    vpermi2w %zmm1, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <21,53,u,22,54,u,23,55,u,24,56,u,25,57,u,26,58,u,27,59,u,28,60,u,29,61,u,30,62,u,31,63>
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,54,3,4,55,6,7,56,9,10,57,12,13,58,15,16,59,18,19,60,21,22,61,24,25,62,27,28,63,30,31]
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm3, %zmm1
+; AVX512-NEXT:    vmovdqu64 %zmm1, 128(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm4, (%rcx)
 ; AVX512-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
index 175748c40f8bf..f911ff1090a00 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
@@ -63,15 +63,22 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT:    movdqa %xmm0, 16(%r8)
-; SSE-NEXT:    movdqa %xmm2, (%r8)
+; SSE-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm1[0]
+; SSE-NEXT:    movdqa %xmm2, %xmm5
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm3[0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[2,0,3,1,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[2,0,3,1,4,5,6,7]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT:    movdqa %xmm0, (%r8)
+; SSE-NEXT:    movdqa %xmm4, 16(%r8)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf4:
@@ -317,107 +324,107 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 ;
 ; AVX1-LABEL: vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm8
-; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm5
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm9
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm6
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm8
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm9
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm3
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm7
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm4
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm10, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm10, %ymm1
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm10, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm10, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm5, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX1-NEXT:    vmovaps %ymm2, (%r8)
-; AVX1-NEXT:    vmovaps %ymm0, 96(%r8)
-; AVX1-NEXT:    vmovaps %ymm1, 64(%r8)
-; AVX1-NEXT:    vmovaps %ymm11, 32(%r8)
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
+; AVX1-NEXT:    vmovaps %ymm2, 96(%r8)
+; AVX1-NEXT:    vmovaps %ymm0, (%r8)
+; AVX1-NEXT:    vmovaps %ymm1, 32(%r8)
+; AVX1-NEXT:    vmovaps %ymm11, 64(%r8)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vf16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rcx), %xmm8
-; AVX2-NEXT:    vmovdqa 16(%rcx), %xmm5
-; AVX2-NEXT:    vmovdqa (%rdx), %xmm9
-; AVX2-NEXT:    vmovdqa 16(%rdx), %xmm6
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX2-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX2-NEXT:    vmovdqa 16(%rcx), %xmm8
+; AVX2-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-NEXT:    vmovdqa 16(%rdx), %xmm9
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm3
-; AVX2-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX2-NEXT:    vmovdqa 16(%rsi), %xmm7
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX2-NEXT:    vmovdqa 16(%rsi), %xmm3
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm10, %ymm3
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm10, %ymm2
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm2
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm5, %ymm0
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX2-NEXT:    vmovdqa %ymm2, (%r8)
-; AVX2-NEXT:    vmovdqa %ymm0, 96(%r8)
-; AVX2-NEXT:    vmovdqa %ymm1, 64(%r8)
-; AVX2-NEXT:    vmovdqa %ymm11, 32(%r8)
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm2
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
+; AVX2-NEXT:    vmovdqa %ymm2, 96(%r8)
+; AVX2-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-NEXT:    vmovdqa %ymm1, 32(%r8)
+; AVX2-NEXT:    vmovdqa %ymm11, 64(%r8)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -543,207 +550,207 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
 ;
 ; AVX1-LABEL: vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm12
-; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm15
-; AVX1-NEXT:    vmovdqa 32(%rcx), %xmm3
-; AVX1-NEXT:    vmovdqa 48(%rcx), %xmm11
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm13
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm6
-; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm1
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm8
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm14
-; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm2
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm15
+; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm12
+; AVX1-NEXT:    vmovdqa 32(%rcx), %xmm11
+; AVX1-NEXT:    vmovdqa 48(%rcx), %xmm2
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm13
+; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm1
+; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm7
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm8
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm14
+; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm3
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm5
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm9, %ymm0
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm8, %ymm8
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm10, %ymm0
-; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm10
+; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm10
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm0
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm8, %ymm1
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm7, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm6, %ymm2
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4],ymm0[5],ymm3[6],ymm0[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
-; AVX1-NEXT:    vmovaps %ymm2, (%r8)
-; AVX1-NEXT:    vmovaps %ymm1, 96(%r8)
-; AVX1-NEXT:    vmovaps %ymm0, 64(%r8)
-; AVX1-NEXT:    vmovaps %ymm3, 160(%r8)
-; AVX1-NEXT:    vmovaps %ymm11, 128(%r8)
-; AVX1-NEXT:    vmovaps %ymm8, 224(%r8)
-; AVX1-NEXT:    vmovaps %ymm9, 192(%r8)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
+; AVX1-NEXT:    vmovaps %ymm3, 96(%r8)
+; AVX1-NEXT:    vmovaps %ymm1, (%r8)
 ; AVX1-NEXT:    vmovaps %ymm0, 32(%r8)
+; AVX1-NEXT:    vmovaps %ymm2, 192(%r8)
+; AVX1-NEXT:    vmovaps %ymm11, 224(%r8)
+; AVX1-NEXT:    vmovaps %ymm8, 128(%r8)
+; AVX1-NEXT:    vmovaps %ymm9, 160(%r8)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 64(%r8)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vf32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rcx), %xmm12
-; AVX2-NEXT:    vmovdqa 16(%rcx), %xmm15
-; AVX2-NEXT:    vmovdqa 32(%rcx), %xmm3
-; AVX2-NEXT:    vmovdqa 48(%rcx), %xmm11
-; AVX2-NEXT:    vmovdqa (%rdx), %xmm13
-; AVX2-NEXT:    vmovdqa 16(%rdx), %xmm6
-; AVX2-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX2-NEXT:    vmovdqa 48(%rdx), %xmm1
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm8
-; AVX2-NEXT:    vmovdqa (%rsi), %xmm14
-; AVX2-NEXT:    vmovdqa 48(%rsi), %xmm2
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; AVX2-NEXT:    vmovdqa (%rcx), %xmm15
+; AVX2-NEXT:    vmovdqa 16(%rcx), %xmm12
+; AVX2-NEXT:    vmovdqa 32(%rcx), %xmm11
+; AVX2-NEXT:    vmovdqa 48(%rcx), %xmm2
+; AVX2-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-NEXT:    vmovdqa 16(%rdx), %xmm13
+; AVX2-NEXT:    vmovdqa 32(%rdx), %xmm1
+; AVX2-NEXT:    vmovdqa 48(%rdx), %xmm7
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm8
+; AVX2-NEXT:    vmovdqa 16(%rsi), %xmm14
+; AVX2-NEXT:    vmovdqa 32(%rsi), %xmm3
+; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm5
+; AVX2-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm9, %ymm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
 ; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm8, %ymm8
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm10, %ymm0
-; AVX2-NEXT:    vmovdqa 32(%rsi), %xmm10
+; AVX2-NEXT:    vmovdqa 48(%rsi), %xmm10
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
-; AVX2-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm0
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm8, %ymm1
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX2-NEXT:    vmovdqa 16(%rsi), %xmm4
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm3, %ymm0
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm7, %ymm2
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7]
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm6, %ymm2
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm7, %ymm3
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4],ymm0[5],ymm3[6],ymm0[7]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm6, %ymm3
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm4, %ymm1
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,0,1,1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
 ; AVX2-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
-; AVX2-NEXT:    vmovdqa %ymm2, (%r8)
-; AVX2-NEXT:    vmovdqa %ymm1, 96(%r8)
-; AVX2-NEXT:    vmovdqa %ymm0, 64(%r8)
-; AVX2-NEXT:    vmovdqa %ymm3, 160(%r8)
-; AVX2-NEXT:    vmovdqa %ymm11, 128(%r8)
-; AVX2-NEXT:    vmovdqa %ymm8, 224(%r8)
-; AVX2-NEXT:    vmovdqa %ymm9, 192(%r8)
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
+; AVX2-NEXT:    vmovdqa %ymm3, 96(%r8)
+; AVX2-NEXT:    vmovdqa %ymm1, (%r8)
+; AVX2-NEXT:    vmovdqa %ymm0, 32(%r8)
+; AVX2-NEXT:    vmovdqa %ymm2, 192(%r8)
+; AVX2-NEXT:    vmovdqa %ymm11, 224(%r8)
+; AVX2-NEXT:    vmovdqa %ymm8, 128(%r8)
+; AVX2-NEXT:    vmovdqa %ymm9, 160(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 32(%r8)
+; AVX2-NEXT:    vmovaps %ymm0, 64(%r8)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index 43acd85d68b0a..f671c4d1d2175 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -2,8 +2,8 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX1
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2-SLOW
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FAST
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FAST
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FAST,AVX2-FAST-ALL
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FAST,AVX2-FAST-PERLANE
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
@@ -127,50 +127,46 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
 ; SSE-LABEL: vf4:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE-NEXT:    movq {{.*#+}} xmm5 = mem[0],zero
-; SSE-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
-; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[3,3,3,3]
-; SSE-NEXT:    psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT:    movdqa {{.*#+}} xmm7 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[2,1,1,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,6,7]
-; SSE-NEXT:    pand %xmm7, %xmm4
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    por %xmm4, %xmm7
-; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm4, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
-; SSE-NEXT:    pandn %xmm5, %xmm4
+; SSE-NEXT:    movq {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm8 = mem[0],zero
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[1,1,1,1]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,0,2,1]
+; SSE-NEXT:    movdqa {{.*#+}} xmm6 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm6, %xmm7
+; SSE-NEXT:    pandn %xmm4, %xmm7
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,1,1]
+; SSE-NEXT:    pand %xmm6, %xmm4
 ; SSE-NEXT:    por %xmm7, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm7 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,1,1]
-; SSE-NEXT:    pand %xmm7, %xmm2
-; SSE-NEXT:    pandn %xmm3, %xmm7
-; SSE-NEXT:    por %xmm2, %xmm7
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT:    pand %xmm2, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm2
-; SSE-NEXT:    por %xmm7, %xmm2
-; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    psrlq $48, %xmm1
-; SSE-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
-; SSE-NEXT:    pand %xmm3, %xmm6
-; SSE-NEXT:    pandn %xmm0, %xmm3
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    movq %xmm3, 32(%r9)
-; SSE-NEXT:    movdqa %xmm2, (%r9)
-; SSE-NEXT:    movdqa %xmm4, 16(%r9)
+; SSE-NEXT:    movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT:    pand %xmm7, %xmm4
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm7, %xmm5
+; SSE-NEXT:    pandn %xmm3, %xmm5
+; SSE-NEXT:    por %xmm4, %xmm5
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm6, %xmm3
+; SSE-NEXT:    pandn %xmm2, %xmm6
+; SSE-NEXT:    por %xmm3, %xmm6
+; SSE-NEXT:    pand %xmm7, %xmm6
+; SSE-NEXT:    pandn %xmm9, %xmm7
+; SSE-NEXT:    por %xmm6, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    movq %xmm0, 32(%r9)
+; SSE-NEXT:    movdqa %xmm7, 16(%r9)
+; SSE-NEXT:    movdqa %xmm5, (%r9)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf4:
@@ -294,194 +290,287 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
 define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.vecptr2, <8 x i16>* %in.vecptr3, <8 x i16>* %in.vecptr4, <40 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm13
-; SSE-NEXT:    movdqa (%rsi), %xmm11
-; SSE-NEXT:    movdqa (%rdx), %xmm10
-; SSE-NEXT:    movdqa (%rcx), %xmm12
+; SSE-NEXT:    movdqa (%rdi), %xmm11
+; SSE-NEXT:    movdqa (%rsi), %xmm13
+; SSE-NEXT:    movdqa (%rdx), %xmm4
+; SSE-NEXT:    movdqa (%rcx), %xmm2
 ; SSE-NEXT:    movdqa (%r8), %xmm8
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    pandn %xmm13, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm11[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm0, %xmm4
-; SSE-NEXT:    por %xmm1, %xmm4
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm10[1,1,2,2]
-; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,65535,65535,0]
-; SSE-NEXT:    pand %xmm3, %xmm5
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm12[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm3, %xmm1
-; SSE-NEXT:    pandn %xmm7, %xmm1
-; SSE-NEXT:    por %xmm5, %xmm1
-; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    pandn %xmm4, %xmm2
-; SSE-NEXT:    por %xmm1, %xmm2
 ; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    pand %xmm5, %xmm2
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm5, %xmm3
 ; SSE-NEXT:    pandn %xmm8, %xmm5
-; SSE-NEXT:    por %xmm2, %xmm5
-; SSE-NEXT:    movdqa %xmm10, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    por %xmm3, %xmm5
+; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm3, %xmm6
+; SSE-NEXT:    pandn %xmm5, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[1,1,2,2]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm13[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm0, %xmm7
+; SSE-NEXT:    pandn %xmm5, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm6, %xmm0
+; SSE-NEXT:    movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT:    pand %xmm7, %xmm0
+; SSE-NEXT:    movdqa %xmm7, %xmm14
+; SSE-NEXT:    pandn %xmm11, %xmm14
+; SSE-NEXT:    por %xmm0, %xmm14
+; SSE-NEXT:    movdqa %xmm13, %xmm0
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm3, %xmm9
+; SSE-NEXT:    pandn %xmm0, %xmm9
+; SSE-NEXT:    movdqa %xmm2, %xmm10
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm12 = xmm4[1,1,1,1]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm8[0,1,0,1]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm8[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    por %xmm9, %xmm1
+; SSE-NEXT:    psrld $16, %xmm10
+; SSE-NEXT:    movdqa %xmm7, %xmm6
+; SSE-NEXT:    pandn %xmm10, %xmm6
+; SSE-NEXT:    pand %xmm7, %xmm1
+; SSE-NEXT:    por %xmm1, %xmm6
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm4[0,1,2,2,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT:    movdqa %xmm13, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm4[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[0,1,1,1]
-; SSE-NEXT:    pand %xmm2, %xmm7
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    por %xmm7, %xmm2
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm8[0,1,0,1]
-; SSE-NEXT:    pandn %xmm9, %xmm0
-; SSE-NEXT:    por %xmm2, %xmm0
-; SSE-NEXT:    movdqa %xmm13, %xmm1
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
-; SSE-NEXT:    psrlq $48, %xmm11
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm11[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm2, %xmm7
-; SSE-NEXT:    pandn %xmm1, %xmm7
-; SSE-NEXT:    movdqa %xmm10, %xmm1
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
-; SSE-NEXT:    pand %xmm2, %xmm6
-; SSE-NEXT:    por %xmm7, %xmm6
-; SSE-NEXT:    pand %xmm3, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3]
-; SSE-NEXT:    pandn %xmm7, %xmm3
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm13[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,2,2]
-; SSE-NEXT:    pand %xmm2, %xmm6
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    por %xmm6, %xmm2
-; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT:    pand %xmm1, %xmm2
-; SSE-NEXT:    pandn %xmm7, %xmm1
-; SSE-NEXT:    por %xmm2, %xmm1
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm12[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm2, %xmm4
-; SSE-NEXT:    pandn %xmm9, %xmm2
-; SSE-NEXT:    por %xmm4, %xmm2
-; SSE-NEXT:    movdqa %xmm2, 16(%r9)
-; SSE-NEXT:    movdqa %xmm1, 48(%r9)
-; SSE-NEXT:    movdqa %xmm3, 64(%r9)
-; SSE-NEXT:    movdqa %xmm0, (%r9)
-; SSE-NEXT:    movdqa %xmm5, 32(%r9)
+; SSE-NEXT:    movdqa %xmm3, %xmm4
+; SSE-NEXT:    pandn %xmm1, %xmm4
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm11[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,1,1]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    por %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm7, %xmm1
+; SSE-NEXT:    movdqa %xmm7, %xmm4
+; SSE-NEXT:    pandn %xmm0, %xmm4
+; SSE-NEXT:    por %xmm1, %xmm4
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm3, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm1
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    pand %xmm7, %xmm0
+; SSE-NEXT:    psrlq $48, %xmm13
+; SSE-NEXT:    movdqa %xmm7, %xmm1
+; SSE-NEXT:    pandn %xmm13, %xmm1
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm3, %xmm2
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    por %xmm2, %xmm3
+; SSE-NEXT:    pand %xmm7, %xmm3
+; SSE-NEXT:    pandn %xmm12, %xmm7
+; SSE-NEXT:    por %xmm3, %xmm7
+; SSE-NEXT:    movdqa %xmm7, 16(%r9)
+; SSE-NEXT:    movdqa %xmm1, 64(%r9)
+; SSE-NEXT:    movdqa %xmm4, (%r9)
+; SSE-NEXT:    movdqa %xmm6, 48(%r9)
+; SSE-NEXT:    movdqa %xmm14, 32(%r9)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm3
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm4
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX1-NEXT:    vmovdqa (%r8), %xmm1
-; AVX1-NEXT:    vpsrlq $48, %xmm3, %xmm2
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm0[1],xmm2[1]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm9[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm7[0,1,2],xmm2[3,4],xmm7[5,6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX1-NEXT:    vmovdqa (%r8), %xmm4
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm4[3],xmm5[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm0[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5,6],xmm5[7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm9 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,2,2,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,0,2,1]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm2[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[0,1,3,2,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,1,1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5,6],xmm7[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,1,0,1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm7[4],xmm6[5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[2,2,2,2,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1],xmm2[2,3],xmm6[4,5],xmm2[6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1],xmm2[2,3,4,5],xmm7[6],xmm2[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm3[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm0[4],xmm6[5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4,5,6],xmm5[7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1,2,3],xmm6[4,5],xmm4[6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm1[3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,xmm9[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3,4],xmm0[5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX1-NEXT:    vmovdqa %xmm0, 48(%r9)
-; AVX1-NEXT:    vmovdqa %xmm4, 32(%r9)
-; AVX1-NEXT:    vmovdqa %xmm2, 16(%r9)
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2,3,4,5],xmm7[6],xmm5[7]
+; AVX1-NEXT:    vmovdqa %xmm5, 16(%r9)
 ; AVX1-NEXT:    vmovdqa %xmm10, (%r9)
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0,1],xmm1[2],xmm8[3,4,5,6],xmm1[7]
+; AVX1-NEXT:    vmovdqa %xmm9, 48(%r9)
+; AVX1-NEXT:    vmovdqa %xmm8, 32(%r9)
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6],xmm2[7]
+; AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
 ; AVX1-NEXT:    vmovdqa %xmm0, 64(%r9)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf8:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm4
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm1, %ymm6
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm5[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,2,3,2,4,6,7,6]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[2,3,0,1]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[6,7,10,11,u,u,6,7,u,u,8,9,12,13,u,u,22,23,26,27,u,u,22,23,u,u,24,25,28,29,u,u]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3],ymm7[4],ymm8[5,6],ymm7[7],ymm8[8,9],ymm7[10],ymm8[11],ymm7[12],ymm8[13,14],ymm7[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,6,7,10,11,u,u,u,u,u,u,8,9,u,u,u,u,22,23,26,27,u,u,u,u,u,u,24,25]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm6[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,5,5,4,7,8,9,10,11,13,13,12,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6],ymm8[7],ymm9[8,9],ymm8[10,11],ymm9[12,13,14],ymm8[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm7, %ymm8, %ymm7
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm7, %ymm4, %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[0,1,8,9,12,13],zero,zero,zero,zero,ymm6[2,3,18,19,18,19],zero,zero,zero,zero,ymm6[28,29,20,21,28,29],zero,zero
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,0,2]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm5[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm5[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm5[22,23]
-; AVX2-SLOW-NEXT:    vpor %ymm6, %ymm5, %ymm5
-; AVX2-SLOW-NEXT:    vpbroadcastq (%r8), %ymm6
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm5
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm5[6,7,u,u,u,u,10,11,u,u,8,9,u,u,u,u,22,23,u,u,u,u,26,27,u,u,24,25,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,6,7,10,11,u,u,8,9,u,u,8,9,12,13,u,u,22,23,26,27,u,u,24,25,u,u,24,25,28,29]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10],ymm6[11],ymm5[12],ymm6[13],ymm5[14,15]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm6
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[8,9,10,11,6,7,u,u,u,u,10,11,12,13,8,9,24,25,26,27,22,23,u,u,u,u,26,27,28,29,24,25]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4],ymm7[5,6,7,8,9,10],ymm6[11,12],ymm7[13,14,15]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
+; AVX2-SLOW-NEXT:    vpbroadcastq 8(%rdi), %ymm6
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
-; AVX2-SLOW-NEXT:    vpsrlq $48, %xmm2, %xmm2
-; AVX2-SLOW-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm6
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[0,1,8,9,12,13],zero,zero,zero,zero,ymm6[2,3,18,19,18,19],zero,zero,zero,zero,ymm6[28,29,20,21,28,29],zero,zero
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm3, %ymm8
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,0,2]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm8[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm8[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm8[22,23]
+; AVX2-SLOW-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX2-SLOW-NEXT:    vpbroadcastq (%r8), %ymm8
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm6, %ymm8, %ymm6
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,6]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 12(%r8), %xmm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6],xmm1[7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6],xmm3[7]
+; AVX2-SLOW-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, 64(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm5, (%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 32(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm6, (%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm5, 32(%r9)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
+; AVX2-FAST-ALL-LABEL: vf8:
+; AVX2-FAST-ALL:       # %bb.0:
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm4
+; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,2,0]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,ymm5[0,1,8,9,12,13],zero,zero,zero,zero,ymm5[2,3,18,19,18,19],zero,zero,zero,zero,ymm5[28,29,20,21,28,29],zero,zero
+; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm6
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm6[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm6[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm6[22,23]
+; AVX2-FAST-ALL-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq (%r8), %ymm6
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
+; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm6
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = <1,5,2,u,6,2,u,u>
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm6, %ymm8, %ymm6
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[2,3,6,7,6,7],zero,zero,zero,zero,ymm6[8,9,16,17,18,19],zero,zero,zero,zero,ymm6[22,23,18,19,18,19],zero,zero
+; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm8
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm9 = <1,5,2,6,2,6,3,u>
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm8, %ymm9, %ymm8
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[2,3,6,7],zero,zero,zero,zero,zero,zero,ymm8[8,9,12,13],zero,zero,zero,zero,zero,zero,ymm8[18,19,22,23],zero,zero,zero,zero,zero,zero,ymm8[24,25]
+; AVX2-FAST-ALL-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq 8(%rdi), %ymm8
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm6, %ymm8, %ymm6
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,8,9,14,15,u,u,u,u,u,u,12,13]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,10,11,u,u,u,u,u,u,12,13,14,15,u,u]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6],xmm1[7]
+; AVX2-FAST-ALL-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm0, 64(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm6, 32(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm5, (%r9)
+; AVX2-FAST-ALL-NEXT:    vzeroupper
+; AVX2-FAST-ALL-NEXT:    retq
+;
+; AVX2-FAST-PERLANE-LABEL: vf8:
+; AVX2-FAST-PERLANE:       # %bb.0:
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm5[6,7,u,u,u,u,10,11,u,u,8,9,u,u,u,u,22,23,u,u,u,u,26,27,u,u,24,25,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,6,7,10,11,u,u,8,9,u,u,8,9,12,13,u,u,22,23,26,27,u,u,24,25,u,u,24,25,28,29]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10],ymm6[11],ymm5[12],ymm6[13],ymm5[14,15]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[8,9,10,11,6,7,u,u,u,u,10,11,12,13,8,9,24,25,26,27,22,23,u,u,u,u,26,27,28,29,24,25]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4],ymm7[5,6,7,8,9,10],ymm6[11,12],ymm7[13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq 8(%rdi), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm1, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[0,1,8,9,12,13],zero,zero,zero,zero,ymm6[2,3,18,19,18,19],zero,zero,zero,zero,ymm6[28,29,20,21,28,29],zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm2, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,0,2]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm8[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm8[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm8[22,23]
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq (%r8), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm6, %ymm8, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,8,9,14,15,u,u,u,u,u,u,12,13]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,10,11,u,u,u,u,u,u,12,13,14,15,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6],xmm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, 64(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, (%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm5, 32(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vzeroupper
+; AVX2-FAST-PERLANE-NEXT:    retq
+;
 ; AVX512-LABEL: vf8:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
@@ -519,515 +608,604 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <16 x i16>* %in.vecptr3, <16 x i16>* %in.vecptr4, <80 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pushq %rax
-; SSE-NEXT:    movdqa (%rdi), %xmm3
-; SSE-NEXT:    movdqa 16(%rdi), %xmm5
-; SSE-NEXT:    movdqa (%rsi), %xmm6
+; SSE-NEXT:    movdqa 16(%rdi), %xmm10
+; SSE-NEXT:    movaps (%rsi), %xmm0
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 16(%rsi), %xmm1
+; SSE-NEXT:    movdqa (%rdx), %xmm14
+; SSE-NEXT:    movdqa 16(%rdx), %xmm4
+; SSE-NEXT:    movdqa (%rcx), %xmm15
+; SSE-NEXT:    movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 16(%rcx), %xmm11
+; SSE-NEXT:    movdqa (%r8), %xmm9
+; SSE-NEXT:    movdqa 16(%r8), %xmm13
+; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    pandn %xmm13, %xmm0
+; SSE-NEXT:    movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm11[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm3, %xmm5
+; SSE-NEXT:    por %xmm0, %xmm5
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm2, %xmm12
+; SSE-NEXT:    pandn %xmm5, %xmm12
+; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm4[1,1,2,2]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT:    movdqa %xmm0, %xmm5
+; SSE-NEXT:    pandn %xmm8, %xmm5
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm1[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm0, %xmm7
+; SSE-NEXT:    por %xmm5, %xmm7
+; SSE-NEXT:    pand %xmm2, %xmm7
+; SSE-NEXT:    por %xmm12, %xmm7
+; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT:    pand %xmm5, %xmm7
+; SSE-NEXT:    movdqa %xmm5, %xmm6
+; SSE-NEXT:    pandn %xmm10, %xmm6
+; SSE-NEXT:    por %xmm7, %xmm6
 ; SSE-NEXT:    movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rsi), %xmm8
-; SSE-NEXT:    movdqa 16(%rdx), %xmm10
-; SSE-NEXT:    movdqa (%rcx), %xmm12
-; SSE-NEXT:    movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rcx), %xmm13
-; SSE-NEXT:    movdqa 16(%r8), %xmm15
-; SSE-NEXT:    movdqa {{.*#+}} xmm11 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm11, %xmm1
-; SSE-NEXT:    pandn %xmm5, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm8[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm11, %xmm2
-; SSE-NEXT:    por %xmm1, %xmm2
-; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm1, %xmm7
-; SSE-NEXT:    pandn %xmm2, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,2,2]
-; SSE-NEXT:    movdqa {{.*#+}} xmm14 = [65535,65535,0,65535,65535,65535,65535,0]
-; SSE-NEXT:    pand %xmm14, %xmm0
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm13[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm9 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm14, %xmm4
-; SSE-NEXT:    pandn %xmm9, %xmm4
-; SSE-NEXT:    por %xmm0, %xmm4
-; SSE-NEXT:    pand %xmm1, %xmm4
-; SSE-NEXT:    por %xmm7, %xmm4
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    pand %xmm2, %xmm4
-; SSE-NEXT:    movdqa %xmm2, %xmm0
-; SSE-NEXT:    pandn %xmm15, %xmm0
-; SSE-NEXT:    por %xmm4, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm11, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm15[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm3, %xmm7
+; SSE-NEXT:    pandn %xmm9, %xmm3
+; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    por %xmm7, %xmm3
+; SSE-NEXT:    movdqa %xmm2, %xmm7
+; SSE-NEXT:    pandn %xmm3, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm14[1,1,2,2]
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm8[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm0, %xmm6
 ; SSE-NEXT:    pandn %xmm3, %xmm0
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm6[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm11, %xmm4
-; SSE-NEXT:    por %xmm0, %xmm4
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm12[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa (%rdi), %xmm12
+; SSE-NEXT:    por %xmm6, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pand %xmm5, %xmm0
+; SSE-NEXT:    movdqa %xmm5, %xmm3
+; SSE-NEXT:    pandn %xmm12, %xmm3
+; SSE-NEXT:    por %xmm0, %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm2, %xmm6
+; SSE-NEXT:    pandn %xmm0, %xmm6
+; SSE-NEXT:    movdqa %xmm13, %xmm3
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm6, %xmm0
+; SSE-NEXT:    movdqa %xmm11, %xmm6
+; SSE-NEXT:    psrld $16, %xmm6
+; SSE-NEXT:    movdqa %xmm5, %xmm15
+; SSE-NEXT:    pandn %xmm6, %xmm15
+; SSE-NEXT:    pand %xmm5, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm15
+; SSE-NEXT:    movdqa %xmm8, %xmm0
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm14, %xmm7
+; SSE-NEXT:    movdqa %xmm2, %xmm6
+; SSE-NEXT:    pandn %xmm0, %xmm6
+; SSE-NEXT:    movdqa %xmm9, %xmm8
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm8[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm7
+; SSE-NEXT:    por %xmm6, %xmm7
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT:    movdqa %xmm13, %xmm0
+; SSE-NEXT:    psrld $16, %xmm0
+; SSE-NEXT:    movdqa %xmm5, %xmm9
+; SSE-NEXT:    pandn %xmm0, %xmm9
+; SSE-NEXT:    pand %xmm5, %xmm7
+; SSE-NEXT:    por %xmm7, %xmm9
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    movdqa %xmm4, %xmm0
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm0
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3]
+; SSE-NEXT:    psrlq $48, %xmm1
+; SSE-NEXT:    movdqa %xmm5, %xmm3
+; SSE-NEXT:    pandn %xmm1, %xmm3
+; SSE-NEXT:    pand %xmm5, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm3
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm11[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm7
+; SSE-NEXT:    por %xmm1, %xmm7
+; SSE-NEXT:    movdqa %xmm5, %xmm11
+; SSE-NEXT:    pandn %xmm0, %xmm11
+; SSE-NEXT:    pand %xmm5, %xmm7
+; SSE-NEXT:    por %xmm7, %xmm11
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm4[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pandn %xmm0, %xmm4
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm10[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm5, %xmm10
+; SSE-NEXT:    pandn %xmm7, %xmm10
+; SSE-NEXT:    pand %xmm5, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm10
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm7
 ; SSE-NEXT:    pandn %xmm0, %xmm7
-; SSE-NEXT:    movdqa (%rdx), %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
-; SSE-NEXT:    pand %xmm14, %xmm0
-; SSE-NEXT:    por %xmm0, %xmm7
-; SSE-NEXT:    pand %xmm1, %xmm7
-; SSE-NEXT:    pandn %xmm4, %xmm1
-; SSE-NEXT:    movdqa (%r8), %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    por %xmm7, %xmm1
+; SSE-NEXT:    movdqa %xmm14, %xmm0
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
 ; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    pandn %xmm0, %xmm2
-; SSE-NEXT:    por %xmm1, %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm5, %xmm1
-; SSE-NEXT:    movdqa %xmm5, %xmm12
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
-; SSE-NEXT:    psrlq $48, %xmm8
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm8[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm0, %xmm7
-; SSE-NEXT:    pandn %xmm1, %xmm7
-; SSE-NEXT:    movdqa %xmm10, %xmm1
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm7, %xmm6
-; SSE-NEXT:    pand %xmm14, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm15[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm14, %xmm2
-; SSE-NEXT:    pandn %xmm4, %xmm2
-; SSE-NEXT:    por %xmm6, %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    movdqa %xmm0, %xmm7
-; SSE-NEXT:    pandn %xmm1, %xmm7
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm12[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm7, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm9 = [0,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm9, %xmm1
-; SSE-NEXT:    pandn %xmm4, %xmm1
-; SSE-NEXT:    pand %xmm9, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm1
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm5[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm13, %xmm6
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1],xmm6[2],xmm10[2],xmm6[3],xmm10[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm12, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm15[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm12, %xmm15
-; SSE-NEXT:    pandn %xmm1, %xmm15
-; SSE-NEXT:    por %xmm6, %xmm15
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm10[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm10 = xmm6[0,0,2,1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm13 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT:    movdqa %xmm13, %xmm6
-; SSE-NEXT:    pandn %xmm10, %xmm6
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
-; SSE-NEXT:    pand %xmm13, %xmm5
-; SSE-NEXT:    por %xmm6, %xmm5
-; SSE-NEXT:    movdqa %xmm11, %xmm10
-; SSE-NEXT:    pandn %xmm1, %xmm10
-; SSE-NEXT:    pand %xmm11, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm10
-; SSE-NEXT:    movdqa %xmm3, %xmm8
-; SSE-NEXT:    movdqa %xmm3, %xmm1
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT:    movdqa %xmm3, %xmm5
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE-NEXT:    psrlq $48, %xmm4
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm4[1]
+; SSE-NEXT:    por %xmm7, %xmm1
+; SSE-NEXT:    pand %xmm5, %xmm1
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE-NEXT:    psrlq $48, %xmm0
 ; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm5, %xmm4
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm6, %xmm5
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    por %xmm4, %xmm3
-; SSE-NEXT:    pand %xmm14, %xmm3
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
-; SSE-NEXT:    pandn %xmm4, %xmm14
-; SSE-NEXT:    por %xmm3, %xmm14
-; SSE-NEXT:    pslldq {{.*#+}} xmm5 = zero,zero,xmm5[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    pandn %xmm5, %xmm0
+; SSE-NEXT:    movdqa %xmm5, %xmm0
+; SSE-NEXT:    pandn %xmm4, %xmm0
 ; SSE-NEXT:    por %xmm1, %xmm0
-; SSE-NEXT:    pand %xmm9, %xmm0
-; SSE-NEXT:    pandn %xmm4, %xmm9
-; SSE-NEXT:    por %xmm0, %xmm9
-; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm12[0,1,2,3,4,5,6,6]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE-NEXT:    pand %xmm12, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm7[0,1,0,1]
-; SSE-NEXT:    pandn %xmm0, %xmm12
-; SSE-NEXT:    por %xmm1, %xmm12
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm6[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm2, %xmm7
+; SSE-NEXT:    pandn %xmm1, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm14[1,1,1,1]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm13[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm6
+; SSE-NEXT:    por %xmm7, %xmm6
+; SSE-NEXT:    movdqa %xmm5, %xmm7
+; SSE-NEXT:    pandn %xmm1, %xmm7
+; SSE-NEXT:    pand %xmm5, %xmm6
+; SSE-NEXT:    por %xmm6, %xmm7
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm14[0,1,2,2,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm8[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,1,1,1]
-; SSE-NEXT:    pand %xmm13, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm13
-; SSE-NEXT:    por %xmm3, %xmm13
-; SSE-NEXT:    pand %xmm11, %xmm13
-; SSE-NEXT:    pandn %xmm0, %xmm11
-; SSE-NEXT:    por %xmm13, %xmm11
-; SSE-NEXT:    movdqa %xmm11, (%r9)
-; SSE-NEXT:    movdqa %xmm12, 16(%r9)
-; SSE-NEXT:    movdqa %xmm9, 48(%r9)
-; SSE-NEXT:    movdqa %xmm14, 64(%r9)
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm12[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,1,1]
+; SSE-NEXT:    pand %xmm2, %xmm6
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    por %xmm6, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,1,0,1]
+; SSE-NEXT:    pand %xmm5, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm5
+; SSE-NEXT:    por %xmm2, %xmm5
+; SSE-NEXT:    movdqa %xmm5, (%r9)
+; SSE-NEXT:    movdqa %xmm7, 16(%r9)
+; SSE-NEXT:    movdqa %xmm0, 64(%r9)
 ; SSE-NEXT:    movdqa %xmm10, 80(%r9)
-; SSE-NEXT:    movdqa %xmm15, 96(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 128(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 144(%r9)
+; SSE-NEXT:    movdqa %xmm11, 96(%r9)
+; SSE-NEXT:    movdqa %xmm3, 144(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 32(%r9)
+; SSE-NEXT:    movdqa %xmm9, 48(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 112(%r9)
-; SSE-NEXT:    popq %rax
+; SSE-NEXT:    movdqa %xmm15, 128(%r9)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm13
-; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm7
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm9
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm3
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm10 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535]
-; AVX1-NEXT:    vandnps %ymm0, %ymm10, %ymm6
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm14
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm15
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT:    vpsrlq $48, %xmm1, %xmm2
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm0[1],xmm2[1]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vandps %ymm2, %ymm10, %ymm2
-; AVX1-NEXT:    vorps %ymm6, %ymm2, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm8
+; AVX1-NEXT:    vmovdqa (%r8), %xmm12
 ; AVX1-NEXT:    vmovdqa 16(%r8), %xmm11
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm4[2],xmm8[3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm4[0],xmm2[1,2,3,4],xmm4[5],xmm2[6,7]
-; AVX1-NEXT:    vpsrlq $48, %xmm15, %xmm2
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm14[1],xmm2[1]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,1,3,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm12[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vandnps %ymm2, %ymm10, %ymm2
-; AVX1-NEXT:    vandps %ymm5, %ymm10, %ymm5
-; AVX1-NEXT:    vorps %ymm2, %ymm5, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,5,6,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm5 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX1-NEXT:    vandnps %ymm0, %ymm5, %ymm0
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT:    vandps %ymm5, %ymm1, %ymm1
-; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm11 = xmm1[0,1,2],xmm11[3],xmm1[4,5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0],xmm4[1],xmm0[2,3,4,5],xmm4[6],xmm0[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,6,7]
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm7
+; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm14
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[0,1,3,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,1]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm9 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX1-NEXT:    vandnps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[1,1,2,2]
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm15
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm6
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandps %ymm2, %ymm9, %ymm2
+; AVX1-NEXT:    vorps %ymm0, %ymm2, %ymm13
+; AVX1-NEXT:    vextractf128 $1, %ymm13, %xmm2
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm10
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7]
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm10[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT:    vandnps %ymm2, %ymm9, %ymm2
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm14[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm11[3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm11[0],xmm14[1],xmm11[1],xmm14[2],xmm11[2],xmm14[3],xmm11[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vandps %ymm1, %ymm9, %ymm1
+; AVX1-NEXT:    vorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7]
+; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm5[0],zero,xmm5[1],zero
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,6]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
-; AVX1-NEXT:    vmovdqa (%r8), %xmm4
-; AVX1-NEXT:    vandnps %ymm0, %ymm5, %ymm0
-; AVX1-NEXT:    vandps %ymm5, %ymm1, %ymm1
-; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm9[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm13[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3,4,5,6],xmm6[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm6 = zero,zero,xmm12[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm15[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm14[4],xmm6[5,6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm6 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX1-NEXT:    vandnps %ymm5, %ymm6, %ymm5
-; AVX1-NEXT:    vandps %ymm6, %ymm3, %ymm3
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3,4,5,6],xmm6[7]
-; AVX1-NEXT:    vorps %ymm5, %ymm3, %ymm3
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm1, %ymm9, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm9, %ymm2
+; AVX1-NEXT:    vorps %ymm1, %ymm2, %ymm2
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[1,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3,4,5],xmm1[6],xmm3[7]
+; AVX1-NEXT:    vpsrlq $48, %xmm15, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm13[0,1,2,3],xmm0[4],xmm13[5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,1,1]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT:    vandnps %ymm2, %ymm9, %ymm2
+; AVX1-NEXT:    vandps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm12[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5,6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm3
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[0,1,6,7,u,u,u,u,8,9,4,5,10,11,u,u]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; AVX1-NEXT:    vandnps %ymm3, %ymm9, %ymm3
+; AVX1-NEXT:    vandps %ymm5, %ymm9, %ymm5
+; AVX1-NEXT:    vorps %ymm3, %ymm5, %ymm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1,2,3,4],xmm6[5],xmm5[6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, 32(%r9)
-; AVX1-NEXT:    vmovdqa %xmm5, 48(%r9)
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,7,6,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,2,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7]
+; AVX1-NEXT:    vpsrld $16, %xmm14, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm3, 128(%r9)
+; AVX1-NEXT:    vmovdqa %xmm5, 144(%r9)
 ; AVX1-NEXT:    vmovdqa %xmm0, (%r9)
-; AVX1-NEXT:    vmovdqa %xmm1, 16(%r9)
-; AVX1-NEXT:    vmovdqa %xmm7, 96(%r9)
-; AVX1-NEXT:    vmovdqa %xmm11, 112(%r9)
-; AVX1-NEXT:    vmovdqa %xmm2, 64(%r9)
-; AVX1-NEXT:    vmovdqa %xmm10, 80(%r9)
-; AVX1-NEXT:    vmovdqa %xmm8, 128(%r9)
+; AVX1-NEXT:    vmovdqa %xmm2, 16(%r9)
+; AVX1-NEXT:    vmovdqa %xmm8, 64(%r9)
+; AVX1-NEXT:    vmovdqa %xmm1, 80(%r9)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm0, 96(%r9)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm0, 112(%r9)
+; AVX1-NEXT:    vmovdqa %xmm4, 32(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 144(%r9)
+; AVX1-NEXT:    vmovaps %xmm0, 48(%r9)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf16:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm12
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm11
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm3
 ; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm8
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm7
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm6
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm0, %ymm5, %ymm0
-; AVX2-SLOW-NEXT:    vpbroadcastq (%r8), %ymm5
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm0, %ymm5, %ymm5
-; AVX2-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm0
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm1, %ymm7, %ymm1
+; AVX2-SLOW-NEXT:    vpbroadcastq (%r8), %ymm8
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm1, %ymm8, %ymm9
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[1,2,2,2]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,2,2,2]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2],xmm1[3],xmm6[4,5],xmm1[6],xmm6[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm8[0,1,1,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm1, %ymm6
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm9[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm2[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,2,6,7,6,6]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = mem[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[3,3,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2],xmm1[3],xmm5[4],xmm1[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpbroadcastq 8(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm8
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,2,3,3,7,6,7,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2],ymm1[3,4],ymm7[5,6,7,8],ymm1[9],ymm7[10],ymm1[11,12],ymm7[13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,3,2]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm11[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3],ymm1[4],ymm5[5,6],ymm1[7],ymm5[8,9],ymm1[10],ymm5[11],ymm1[12],ymm5[13,14],ymm1[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[1,1,1,2,5,5,5,6]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5],ymm1[6],ymm7[7,8],ymm1[9],ymm7[10,11],ymm1[12],ymm7[13],ymm1[14],ymm7[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[0,1,2,1,4,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm2[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3],ymm7[4],ymm10[5,6],ymm7[7],ymm10[8,9],ymm7[10],ymm10[11],ymm7[12],ymm10[13,14],ymm7[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm1, %ymm7, %ymm1
-; AVX2-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm7
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm1, %ymm7, %ymm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm3[3,2,3,3,7,6,7,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm11[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,2,4,6,7,6]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3],ymm6[4],ymm5[5,6],ymm6[7],ymm5[8,9],ymm6[10],ymm5[11],ymm6[12],ymm5[13,14],ymm6[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm1, %ymm5, %ymm1
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm1, %ymm5, %ymm1
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm9[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm3, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm8[1,1,2,2]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm12[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10],ymm4[11],ymm2[12],ymm4[13],ymm2[14,15]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm11[0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm2, %ymm3, %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 64(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 96(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 128(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 32(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm5, (%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 128(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 96(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 32(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm9, (%r9)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
-; AVX2-FAST-LABEL: vf16:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm10
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX2-FAST-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm8
-; AVX2-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm5
-; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm6[6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2,3],xmm5[4],xmm7[5],xmm5[6],xmm7[7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[1,2,2,2]
-; AVX2-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm5, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm8[0,1,1,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm5
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpbroadcastq (%r8), %ymm1
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm6
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm9[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,u,u,26,27,u,u,30,31,28,29,u,u,28,29]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,2,3,3,7,6,7,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,u,u,30,31,u,u,u,u,28,29,30,31,30,31]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3,4],ymm2[5,6,7,8],ymm1[9],ymm2[10],ymm1[11,12],ymm2[13,14,15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,3,2]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u>
-; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm1
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[1,1,1,2,5,5,5,6]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[0,1,2,1,4,5,6,5]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm7[0,1],ymm2[2],ymm7[3],ymm2[4],ymm7[5,6],ymm2[7],ymm7[8,9],ymm2[10],ymm7[11],ymm2[12],ymm7[13,14],ymm2[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm2
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10],ymm2[11],ymm3[12,13],ymm2[14],ymm3[15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm10[u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm9[1,1,2,2]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm8[1,1,2,2]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa %ymm2, 64(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm1, 96(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm0, 128(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm6, (%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm5, 32(%r9)
-; AVX2-FAST-NEXT:    vzeroupper
-; AVX2-FAST-NEXT:    retq
+; AVX2-FAST-ALL-LABEL: vf16:
+; AVX2-FAST-ALL:       # %bb.0:
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm12
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm11
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm3
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm4
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm2
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm6
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm0
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm7, %ymm1, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq (%r8), %ymm8
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm1, %ymm8, %ymm9
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm5 = mem[2,1,2,3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,6,7,u,u,10,11,u,u,u,u,8,9]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3],xmm0[4],xmm5[5,6],xmm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq 8(%rdi), %ymm1
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm8
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[0,1,0,1,4,5,4,5]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3],ymm1[4],ymm5[5,6],ymm1[7],ymm5[8,9],ymm1[10],ymm5[11],ymm1[12],ymm5[13,14],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [25769803781,25769803781,25769803781,25769803781]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm3, %ymm1, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[2,3,2,3,6,7,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[2,3,2,3,6,7,6,7]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,u,u,30,31,u,u,u,u,28,29]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm6 = ymm3[3,2,3,3,7,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3],ymm5[4],ymm6[5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11],ymm5[12],ymm6[13,14],ymm5[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm12[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10],ymm4[11],ymm2[12],ymm4[13],ymm2[14,15]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm3, %ymm2, %ymm2
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm3 = ymm11[0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, 64(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 128(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 96(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm8, 32(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm9, (%r9)
+; AVX2-FAST-ALL-NEXT:    vzeroupper
+; AVX2-FAST-ALL-NEXT:    retq
+;
+; AVX2-FAST-PERLANE-LABEL: vf16:
+; AVX2-FAST-PERLANE:       # %bb.0:
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm7, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq (%r8), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm1, %ymm8, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = mem[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,6,7,u,u,10,11,u,u,u,u,8,9]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3],xmm0[4],xmm5[5,6],xmm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq 8(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3],ymm1[4],ymm5[5,6],ymm1[7],ymm5[8,9],ymm1[10],ymm5[11],ymm1[12],ymm5[13,14],ymm1[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,u,u,30,31,u,u,u,u,28,29]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = ymm3[3,2,3,3,7,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3],ymm5[4],ymm6[5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11],ymm5[12],ymm6[13,14],ymm5[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm1, %ymm5, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm12[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10],ymm4[11],ymm2[12],ymm4[13],ymm2[14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm3, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm11[0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 64(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 128(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 96(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, 32(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, (%r9)
+; AVX2-FAST-PERLANE-NEXT:    vzeroupper
+; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; AVX512-LABEL: vf16:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
-; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,13,29,45,61,u,14,30,46,62,u,15,31,47,63,u>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,16,32,48,u,1,17,33,49,u,2,18,34,50,u,3,19,35,51,u,4,20,36,52,u,5,21,37,53,u,6,22>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31]
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm4, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <6,22,u,39,55,7,23,u,40,56,8,24,u,41,57,9,25,u,42,58,10,26,u,43,59,11,27,u,44,60,12,28>
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm1, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,38,3,4,5,6,39,8,9,10,11,40,13,14,15,16,41,18,19,20,21,42,23,24,25,26,43,28,29,30,31]
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm4, %zmm0
-; AVX512-NEXT:    vmovdqu64 %zmm0, 64(%r9)
-; AVX512-NEXT:    vmovdqu64 %zmm5, (%r9)
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm0 = [28,1,2,3,4,29,6,7,8,9,30,11,12,13,14,31]
-; AVX512-NEXT:    vpermi2w %ymm2, %ymm3, %ymm0
+; AVX512-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX512-NEXT:    vmovdqa (%r8), %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm6
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,16,32,48,u,1,17,33,49,u,2,18,34,50,u,3,19,35,51,u,4,20,36,52,u,5,21,37,53,u,6,22>
+; AVX512-NEXT:    vpermi2w %zmm5, %zmm6, %zmm7
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31]
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm7, %zmm6
+; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm7
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <6,22,38,55,u,7,23,39,56,u,8,24,40,57,u,9,25,41,58,u,10,26,42,59,u,11,27,43,60,u,12,28>
+; AVX512-NEXT:    vpermi2w %zmm7, %zmm5, %zmm8
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,39,5,6,7,8,40,10,11,12,13,41,15,16,17,18,42,20,21,22,23,43,25,26,27,28,44,30,31]
+; AVX512-NEXT:    vpermi2w %zmm1, %zmm8, %zmm5
+; AVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [14,0,29,13,31,15,0,30,14,0,29,13,31,15,0,30]
+; AVX512-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512-NEXT:    vpermi2w %ymm1, %ymm2, %ymm7
+; AVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [12,29,14,31,0,13,30,15,12,29,14,31,0,13,30,15]
+; AVX512-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512-NEXT:    vpermi2w %ymm0, %ymm4, %ymm1
+; AVX512-NEXT:    movw $12684, %ax # imm = 0x318C
+; AVX512-NEXT:    kmovd %eax, %k1
+; AVX512-NEXT:    vmovdqu16 %ymm7, %ymm1 {%k1}
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,2,3,29,5,6,7,8,30,10,11,12,13,31,15]
+; AVX512-NEXT:    vpermi2w %ymm3, %ymm1, %ymm0
 ; AVX512-NEXT:    vmovdqa %ymm0, 128(%r9)
+; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%r9)
+; AVX512-NEXT:    vmovdqu64 %zmm6, (%r9)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <16 x i16>, <16 x i16>* %in.vecptr0, align 32
@@ -1051,1039 +1229,1222 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>* %in.vecptr2, <32 x i16>* %in.vecptr3, <32 x i16>* %in.vecptr4, <160 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    subq $248, %rsp
-; SSE-NEXT:    movdqa (%rdi), %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rdi), %xmm10
+; SSE-NEXT:    subq $328, %rsp # imm = 0x148
+; SSE-NEXT:    movdqa (%rdi), %xmm10
 ; SSE-NEXT:    movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa (%rsi), %xmm13
-; SSE-NEXT:    movdqa 16(%rsi), %xmm9
-; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa (%rsi), %xmm5
+; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movdqa (%rdx), %xmm14
-; SSE-NEXT:    movdqa (%rcx), %xmm11
-; SSE-NEXT:    movdqa 16(%rcx), %xmm12
-; SSE-NEXT:    movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa (%r8), %xmm2
-; SSE-NEXT:    movdqa {{.*#+}} xmm15 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm15, %xmm1
-; SSE-NEXT:    pandn %xmm0, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm13[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm15, %xmm3
-; SSE-NEXT:    por %xmm1, %xmm3
-; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm14[1,1,2,2]
-; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [65535,65535,0,65535,65535,65535,65535,0]
-; SSE-NEXT:    pand %xmm8, %xmm5
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm11[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm8, %xmm7
+; SSE-NEXT:    movdqa 16(%rdx), %xmm9
+; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa (%rcx), %xmm2
+; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 16(%rcx), %xmm8
+; SSE-NEXT:    movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa (%r8), %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 16(%r8), %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT:    movdqa %xmm4, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm4, %xmm1
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm14[1,1,2,2]
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT:    movdqa %xmm1, %xmm7
 ; SSE-NEXT:    pandn %xmm6, %xmm7
-; SSE-NEXT:    por %xmm5, %xmm7
-; SSE-NEXT:    pand %xmm1, %xmm7
-; SSE-NEXT:    por %xmm4, %xmm7
-; SSE-NEXT:    movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    pand %xmm6, %xmm7
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    movdqa %xmm2, %xmm3
-; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm5[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm1, %xmm6
+; SSE-NEXT:    por %xmm7, %xmm6
+; SSE-NEXT:    pand %xmm2, %xmm6
+; SSE-NEXT:    por %xmm0, %xmm6
+; SSE-NEXT:    movdqa {{.*#+}} xmm12 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT:    pand %xmm12, %xmm6
+; SSE-NEXT:    movdqa %xmm12, %xmm0
+; SSE-NEXT:    pandn %xmm10, %xmm0
+; SSE-NEXT:    por %xmm6, %xmm0
 ; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm15, %xmm4
-; SSE-NEXT:    pandn %xmm10, %xmm4
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm9[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm15, %xmm5
-; SSE-NEXT:    por %xmm4, %xmm5
+; SSE-NEXT:    movdqa %xmm4, %xmm0
+; SSE-NEXT:    pandn %xmm3, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm8[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm4, %xmm6
+; SSE-NEXT:    por %xmm0, %xmm6
+; SSE-NEXT:    movdqa %xmm2, %xmm11
+; SSE-NEXT:    pandn %xmm6, %xmm11
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm9[1,1,2,2]
 ; SSE-NEXT:    movdqa %xmm1, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm12[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm8, %xmm5
-; SSE-NEXT:    pandn %xmm4, %xmm5
-; SSE-NEXT:    movdqa 16(%rdx), %xmm10
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[1,1,2,2]
-; SSE-NEXT:    pand %xmm8, %xmm0
-; SSE-NEXT:    por %xmm0, %xmm5
-; SSE-NEXT:    pand %xmm1, %xmm5
-; SSE-NEXT:    por %xmm7, %xmm5
-; SSE-NEXT:    movdqa 16(%r8), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pand %xmm6, %xmm5
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    pandn %xmm6, %xmm7
+; SSE-NEXT:    movdqa 16(%rsi), %xmm0
 ; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 32(%rdi), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm15, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    movdqa 32(%rsi), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm2[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm15, %xmm5
-; SSE-NEXT:    por %xmm0, %xmm5
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    pandn %xmm5, %xmm0
-; SSE-NEXT:    movdqa 32(%rcx), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm2[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm8, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    movdqa 32(%rdx), %xmm12
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm12[1,1,2,2]
-; SSE-NEXT:    pand %xmm8, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm7
-; SSE-NEXT:    pand %xmm1, %xmm7
-; SSE-NEXT:    por %xmm0, %xmm7
-; SSE-NEXT:    pand %xmm6, %xmm7
-; SSE-NEXT:    movdqa 32(%r8), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    por %xmm7, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 48(%rdi), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm15, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    movdqa 48(%rsi), %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm11, %xmm0
+; SSE-NEXT:    movdqa 16(%rdi), %xmm5
+; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    movdqa %xmm12, %xmm3
+; SSE-NEXT:    pandn %xmm5, %xmm3
+; SSE-NEXT:    por %xmm0, %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 32(%r8), %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm4, %xmm0
+; SSE-NEXT:    pandn %xmm3, %xmm0
+; SSE-NEXT:    movdqa 32(%rcx), %xmm3
+; SSE-NEXT:    movdqa %xmm3, (%rsp) # 16-byte Spill
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm3[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm4, %xmm7
+; SSE-NEXT:    por %xmm0, %xmm7
+; SSE-NEXT:    movdqa %xmm2, %xmm13
+; SSE-NEXT:    pandn %xmm7, %xmm13
+; SSE-NEXT:    movdqa 32(%rdx), %xmm15
+; SSE-NEXT:    pshufd {{.*#+}} xmm11 = xmm15[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    pandn %xmm11, %xmm0
+; SSE-NEXT:    movdqa 32(%rsi), %xmm11
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm11[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pand %xmm15, %xmm5
+; SSE-NEXT:    pand %xmm1, %xmm5
 ; SSE-NEXT:    por %xmm0, %xmm5
+; SSE-NEXT:    pand %xmm2, %xmm5
+; SSE-NEXT:    por %xmm13, %xmm5
+; SSE-NEXT:    pand %xmm12, %xmm5
+; SSE-NEXT:    movdqa 32(%rdi), %xmm9
+; SSE-NEXT:    movdqa %xmm12, %xmm0
+; SSE-NEXT:    pandn %xmm9, %xmm0
+; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movdqa 48(%rcx), %xmm0
 ; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm8, %xmm7
-; SSE-NEXT:    pandn %xmm0, %xmm7
-; SSE-NEXT:    movdqa 48(%rdx), %xmm0
+; SSE-NEXT:    pand %xmm4, %xmm0
+; SSE-NEXT:    movdqa 48(%r8), %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pandn %xmm3, %xmm4
+; SSE-NEXT:    por %xmm0, %xmm4
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    pandn %xmm4, %xmm0
+; SSE-NEXT:    movdqa 48(%rsi), %xmm10
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm10[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm1, %xmm4
+; SSE-NEXT:    movdqa 48(%rdx), %xmm8
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm8[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pandn %xmm5, %xmm1
+; SSE-NEXT:    por %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    pand %xmm12, %xmm1
+; SSE-NEXT:    movdqa 48(%rdi), %xmm7
+; SSE-NEXT:    movdqa %xmm12, %xmm0
+; SSE-NEXT:    pandn %xmm7, %xmm0
+; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    por %xmm1, %xmm0
 ; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
-; SSE-NEXT:    pand %xmm8, %xmm0
-; SSE-NEXT:    por %xmm0, %xmm7
-; SSE-NEXT:    pand %xmm1, %xmm7
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pandn %xmm0, %xmm4
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT:    movdqa %xmm3, %xmm4
+; SSE-NEXT:    psrld $16, %xmm4
+; SSE-NEXT:    movdqa %xmm12, %xmm1
+; SSE-NEXT:    pandn %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm2, %xmm5
+; SSE-NEXT:    pandn %xmm0, %xmm5
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT:    punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT:    psrld $16, %xmm5
+; SSE-NEXT:    movdqa %xmm12, %xmm1
 ; SSE-NEXT:    pandn %xmm5, %xmm1
-; SSE-NEXT:    por %xmm7, %xmm1
-; SSE-NEXT:    pand %xmm6, %xmm1
-; SSE-NEXT:    movdqa 48(%r8), %xmm0
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm2, %xmm5
+; SSE-NEXT:    pandn %xmm0, %xmm5
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
 ; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pandn %xmm0, %xmm6
-; SSE-NEXT:    por %xmm1, %xmm6
-; SSE-NEXT:    movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    movdqa (%rsp), %xmm5 # 16-byte Reload
+; SSE-NEXT:    psrld $16, %xmm5
+; SSE-NEXT:    movdqa %xmm12, %xmm1
+; SSE-NEXT:    pandn %xmm5, %xmm1
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm2, %xmm5
+; SSE-NEXT:    pandn %xmm0, %xmm5
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT:    psrld $16, %xmm5
+; SSE-NEXT:    movdqa %xmm12, %xmm1
+; SSE-NEXT:    pandn %xmm5, %xmm1
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movdqa %xmm14, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT:    movdqa %xmm1, %xmm5
+; SSE-NEXT:    movdqa %xmm2, %xmm5
 ; SSE-NEXT:    pandn %xmm0, %xmm5
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm0[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[0,1,1,1]
-; SSE-NEXT:    pand %xmm1, %xmm7
-; SSE-NEXT:    por %xmm5, %xmm7
-; SSE-NEXT:    pand %xmm15, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm15, %xmm4
-; SSE-NEXT:    pandn %xmm2, %xmm4
-; SSE-NEXT:    por %xmm7, %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm11, %xmm5
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT:    movdqa %xmm4, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    pand %xmm4, %xmm7
-; SSE-NEXT:    por %xmm7, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm11[4],xmm14[5],xmm11[5],xmm14[6],xmm11[6],xmm14[7],xmm11[7]
-; SSE-NEXT:    movdqa %xmm14, %xmm2
-; SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT:    movdqa %xmm0, %xmm7
-; SSE-NEXT:    pandn %xmm2, %xmm7
-; SSE-NEXT:    movdqa %xmm6, %xmm2
-; SSE-NEXT:    movdqa %xmm13, %xmm5
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm13[4],xmm2[5],xmm13[5],xmm2[6],xmm13[6],xmm2[7],xmm13[7]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    por %xmm7, %xmm2
-; SSE-NEXT:    movdqa {{.*#+}} xmm13 = [0,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT:    pand %xmm13, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm11 = xmm3[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm13, %xmm3
-; SSE-NEXT:    pandn %xmm11, %xmm3
-; SSE-NEXT:    por %xmm2, %xmm3
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm13[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm14[1,1,1,1]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7]
+; SSE-NEXT:    movdqa %xmm3, %xmm5
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm12, %xmm3
+; SSE-NEXT:    pandn %xmm1, %xmm3
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm3
 ; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm5, %xmm2
-; SSE-NEXT:    psrlq $48, %xmm2
-; SSE-NEXT:    movdqa %xmm6, %xmm3
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm3, %xmm2
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm2, %xmm6
-; SSE-NEXT:    movdqa %xmm8, %xmm2
-; SSE-NEXT:    pandn %xmm11, %xmm2
-; SSE-NEXT:    pand %xmm8, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm10, %xmm2
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT:    movdqa %xmm1, %xmm6
-; SSE-NEXT:    pandn %xmm2, %xmm6
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm9, %xmm2
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm12, %xmm1
+; SSE-NEXT:    pandn %xmm9, %xmm1
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm1
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm8, %xmm1
+; SSE-NEXT:    psrlq $48, %xmm1
+; SSE-NEXT:    movdqa %xmm12, %xmm3
+; SSE-NEXT:    pandn %xmm1, %xmm3
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm3
+; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,1,1]
-; SSE-NEXT:    pand %xmm1, %xmm4
-; SSE-NEXT:    por %xmm6, %xmm4
-; SSE-NEXT:    pand %xmm15, %xmm4
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm11[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm15, %xmm3
-; SSE-NEXT:    pandn %xmm6, %xmm3
-; SSE-NEXT:    por %xmm4, %xmm3
-; SSE-NEXT:    movdqa %xmm3, (%rsp) # 16-byte Spill
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm7, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm14 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT:    movdqa %xmm14, %xmm2
-; SSE-NEXT:    pandn %xmm6, %xmm2
-; SSE-NEXT:    pand %xmm14, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
-; SSE-NEXT:    movdqa %xmm10, %xmm2
-; SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm2, %xmm4
-; SSE-NEXT:    movdqa %xmm9, %xmm3
-; SSE-NEXT:    movdqa %xmm9, %xmm2
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    por %xmm4, %xmm2
-; SSE-NEXT:    pand %xmm13, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm11[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm13, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm6
-; SSE-NEXT:    por %xmm2, %xmm6
-; SSE-NEXT:    movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm5, %xmm2
-; SSE-NEXT:    psrlq $48, %xmm2
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm3, %xmm2
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm10[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm2, %xmm6
-; SSE-NEXT:    movdqa %xmm8, %xmm2
-; SSE-NEXT:    pandn %xmm4, %xmm2
-; SSE-NEXT:    pand %xmm8, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm12, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm8[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[1,1,1,1]
+; SSE-NEXT:    movdqa %xmm5, %xmm3
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
 ; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pandn %xmm2, %xmm4
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm9, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm12, %xmm11
+; SSE-NEXT:    pandn %xmm6, %xmm11
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm11
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm6
+; SSE-NEXT:    pandn %xmm0, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm6, %xmm0
+; SSE-NEXT:    movdqa %xmm12, %xmm9
+; SSE-NEXT:    pandn %xmm10, %xmm9
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm9
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pandn %xmm0, %xmm4
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    movdqa %xmm7, %xmm1
+; SSE-NEXT:    psrlq $48, %xmm1
+; SSE-NEXT:    movdqa %xmm12, %xmm10
+; SSE-NEXT:    pandn %xmm1, %xmm10
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm10
+; SSE-NEXT:    movdqa %xmm15, %xmm5
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    movdqa (%rsp), %xmm3 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pandn %xmm0, %xmm4
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm7[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm15 = xmm15[1,1,1,1]
+; SSE-NEXT:    movdqa %xmm3, %xmm4
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
 ; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm2[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,1,1]
-; SSE-NEXT:    pand %xmm1, %xmm6
-; SSE-NEXT:    por %xmm4, %xmm6
-; SSE-NEXT:    pand %xmm15, %xmm6
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm15, %xmm7
-; SSE-NEXT:    pandn %xmm4, %xmm7
-; SSE-NEXT:    por %xmm6, %xmm7
-; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm5, %xmm6
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
-; SSE-NEXT:    movdqa %xmm14, %xmm11
-; SSE-NEXT:    pandn %xmm4, %xmm11
-; SSE-NEXT:    pand %xmm14, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm11
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm12, %xmm6
+; SSE-NEXT:    pandn %xmm3, %xmm6
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm6
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm0
 ; SSE-NEXT:    movdqa %xmm12, %xmm4
-; SSE-NEXT:    movdqa %xmm12, %xmm5
-; SSE-NEXT:    pslldq {{.*#+}} xmm4 = zero,zero,xmm4[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    movdqa %xmm0, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm6
-; SSE-NEXT:    movdqa %xmm9, %xmm4
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm4
-; SSE-NEXT:    por %xmm6, %xmm4
-; SSE-NEXT:    pand %xmm13, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm10[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm13, %xmm12
-; SSE-NEXT:    pandn %xmm6, %xmm12
-; SSE-NEXT:    por %xmm4, %xmm12
-; SSE-NEXT:    psrlq $48, %xmm3
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm3[1]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm9, %xmm4
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    por %xmm4, %xmm2
-; SSE-NEXT:    movdqa %xmm8, %xmm9
-; SSE-NEXT:    pandn %xmm6, %xmm9
-; SSE-NEXT:    pand %xmm8, %xmm2
-; SSE-NEXT:    por %xmm2, %xmm9
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm3, %xmm2
+; SSE-NEXT:    pandn %xmm15, %xmm4
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm4
+; SSE-NEXT:    pshufhw $172, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = mem[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm0
+; SSE-NEXT:    psrlq $48, %xmm1
+; SSE-NEXT:    movdqa %xmm12, %xmm15
+; SSE-NEXT:    pandn %xmm1, %xmm15
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm15
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm10, %xmm4
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[0,1,3,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,1,1]
-; SSE-NEXT:    pand %xmm1, %xmm6
-; SSE-NEXT:    pandn %xmm2, %xmm1
-; SSE-NEXT:    por %xmm6, %xmm1
-; SSE-NEXT:    pand %xmm15, %xmm1
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    pandn %xmm0, %xmm3
 ; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[0,1,0,1]
-; SSE-NEXT:    pandn %xmm2, %xmm15
-; SSE-NEXT:    por %xmm1, %xmm15
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,5,6,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-NEXT:    movdqa %xmm14, %xmm4
-; SSE-NEXT:    movdqa %xmm3, %xmm6
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[2,2,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm3, %xmm4
-; SSE-NEXT:    pandn %xmm2, %xmm3
-; SSE-NEXT:    por %xmm4, %xmm3
-; SSE-NEXT:    movdqa %xmm6, %xmm2
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    movdqa %xmm2, %xmm14
-; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    movdqa %xmm10, %xmm1
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    por %xmm2, %xmm1
-; SSE-NEXT:    pand %xmm13, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
-; SSE-NEXT:    pandn %xmm2, %xmm13
-; SSE-NEXT:    por %xmm1, %xmm13
-; SSE-NEXT:    psrlq $48, %xmm7
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm10 = xmm10[1],xmm7[1]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,4,5,7,6]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    pandn %xmm10, %xmm0
-; SSE-NEXT:    por %xmm1, %xmm0
-; SSE-NEXT:    pand %xmm8, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm8
-; SSE-NEXT:    por %xmm0, %xmm8
-; SSE-NEXT:    movdqa %xmm8, 304(%r9)
-; SSE-NEXT:    movdqa %xmm13, 288(%r9)
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm5[0,1,3,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm13 = xmm1[1,1,1,1]
+; SSE-NEXT:    movdqa %xmm14, %xmm3
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm12, %xmm14
+; SSE-NEXT:    pandn %xmm7, %xmm14
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm14
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,5,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    movdqa %xmm12, %xmm3
+; SSE-NEXT:    pandn %xmm13, %xmm3
+; SSE-NEXT:    pand %xmm12, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm3
+; SSE-NEXT:    pshufhw $172, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = mem[0,1,2,3,4,7,6,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,7,6]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[2,1,3,3]
+; SSE-NEXT:    pand %xmm2, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm2
+; SSE-NEXT:    por %xmm7, %xmm2
+; SSE-NEXT:    pand %xmm12, %xmm2
+; SSE-NEXT:    psrlq $48, %xmm8
+; SSE-NEXT:    pandn %xmm8, %xmm12
+; SSE-NEXT:    por %xmm2, %xmm12
+; SSE-NEXT:    movdqa %xmm12, 304(%r9)
 ; SSE-NEXT:    movdqa %xmm3, 256(%r9)
-; SSE-NEXT:    movdqa %xmm15, 240(%r9)
-; SSE-NEXT:    movdqa %xmm9, 224(%r9)
-; SSE-NEXT:    movdqa %xmm12, 208(%r9)
-; SSE-NEXT:    movdqa %xmm11, 176(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 160(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 144(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 128(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 96(%r9)
-; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 80(%r9)
+; SSE-NEXT:    movdqa %xmm14, 240(%r9)
+; SSE-NEXT:    movdqa %xmm15, 224(%r9)
+; SSE-NEXT:    movdqa %xmm4, 176(%r9)
+; SSE-NEXT:    movdqa %xmm6, 160(%r9)
+; SSE-NEXT:    movdqa %xmm10, 144(%r9)
+; SSE-NEXT:    movdqa %xmm9, 96(%r9)
+; SSE-NEXT:    movdqa %xmm11, 80(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 64(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 48(%r9)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 16(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, (%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 288(%r9)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 272(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 208(%r9)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 192(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 128(%r9)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 112(%r9)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 48(%r9)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 32(%r9)
-; SSE-NEXT:    addq $248, %rsp
+; SSE-NEXT:    addq $328, %rsp # imm = 0x148
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $72, %rsp
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm11
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm15
-; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm5
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm5[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,4,5,6,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    subq $152, %rsp
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm14
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm9
+; AVX1-NEXT:    vmovdqa 32(%r8), %xmm8
+; AVX1-NEXT:    vmovdqa 48(%r8), %xmm10
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vmovaps {{.*#+}} ymm12 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX1-NEXT:    vandnps %ymm0, %ymm12, %ymm1
-; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm9
-; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2]
-; AVX1-NEXT:    vmovdqa 32(%rcx), %xmm6
-; AVX1-NEXT:    vmovdqa 48(%rcx), %xmm7
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[2,2,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vandps %ymm2, %ymm12, %ymm2
-; AVX1-NEXT:    vorps %ymm1, %ymm2, %ymm10
-; AVX1-NEXT:    vmovdqa 48(%r8), %xmm1
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm8 = zero,zero,xmm4[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm14 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535]
-; AVX1-NEXT:    vandnps %ymm4, %ymm14, %ymm4
-; AVX1-NEXT:    vpsrlq $48, %xmm5, %xmm2
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm3[1],xmm2[1]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vandps %ymm2, %ymm14, %ymm2
-; AVX1-NEXT:    vorps %ymm4, %ymm2, %ymm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3,4],xmm3[5],xmm2[6,7]
+; AVX1-NEXT:    vandnps %ymm0, %ymm12, %ymm2
+; AVX1-NEXT:    vmovdqa 32(%rcx), %xmm0
+; AVX1-NEXT:    vmovdqa 48(%rcx), %xmm11
+; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm13
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
+; AVX1-NEXT:    vandps %ymm5, %ymm12, %ymm5
+; AVX1-NEXT:    vorps %ymm2, %ymm5, %ymm5
+; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm2
+; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm1
+; AVX1-NEXT:    vpsrlq $48, %xmm2, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm6[4],xmm5[5,6,7]
 ; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5,6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5,6],xmm3[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm4 = zero,zero,xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm11[4],xmm4[5,6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm5 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX1-NEXT:    vandnps %ymm2, %ymm5, %ymm2
-; AVX1-NEXT:    vandps %ymm5, %ymm4, %ymm4
-; AVX1-NEXT:    vorps %ymm2, %ymm4, %ymm5
-; AVX1-NEXT:    vpsrlq $48, %xmm15, %xmm2
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm11[1],xmm2[1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm13[0,1,3,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX1-NEXT:    vmovdqa 32(%r8), %xmm4
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[1,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7]
+; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3],xmm5[4,5,6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
+; AVX1-NEXT:    vandnps %ymm5, %ymm12, %ymm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT:    vandps %ymm6, %ymm12, %ymm6
+; AVX1-NEXT:    vorps %ymm5, %ymm6, %ymm5
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm14[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7]
+; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm9[4],xmm6[5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT:    vmovdqa %xmm11, (%rsp) # 16-byte Spill
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm11[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm10[3],xmm7[4,5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm4, %ymm4
+; AVX1-NEXT:    vandnps %ymm6, %ymm12, %ymm6
+; AVX1-NEXT:    vandps %ymm4, %ymm12, %ymm4
+; AVX1-NEXT:    vorps %ymm6, %ymm4, %ymm4
+; AVX1-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm13[0],zero,xmm13[1],zero
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm13[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7]
+; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[0,1,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm1[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm10[0],xmm1[1],xmm10[2,3,4,5],xmm1[6],xmm10[7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vandnps %ymm2, %ymm14, %ymm2
-; AVX1-NEXT:    vandps %ymm0, %ymm14, %ymm0
-; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4],xmm3[5],xmm2[6,7]
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm3[2],xmm0[3,4,5,6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm9
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm14[4],xmm5[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm2[0],xmm14[1],xmm2[1],xmm14[2],xmm2[2],xmm14[3],xmm2[3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[0,1,3,2,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,1,1]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,6]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2],xmm4[3],xmm5[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm4
 ; AVX1-NEXT:    vandnps %ymm0, %ymm12, %ymm0
 ; AVX1-NEXT:    vandps %ymm1, %ymm12, %ymm1
 ; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm4[0,1,0,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[0,1,0,1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
 ; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
 ; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm0
-; AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm4
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm4 = xmm1[1],xmm4[1]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vandnps %ymm2, %ymm14, %ymm2
-; AVX1-NEXT:    vandps %ymm4, %ymm14, %ymm4
-; AVX1-NEXT:    vorps %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vmovdqa 16(%r8), %xmm8
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm8[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm2[1,2,3,4],xmm4[5],xmm2[6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm13
-; AVX1-NEXT:    vpsrlq $48, %xmm13, %xmm2
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm10[1],xmm2[1]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[0,1,3,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm4
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm12[0,1,2,3,4,5,7,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[2,1,3,3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[0,1,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm15, %ymm5
-; AVX1-NEXT:    vandnps %ymm2, %ymm14, %ymm2
-; AVX1-NEXT:    vandps %ymm5, %ymm14, %ymm5
-; AVX1-NEXT:    vorps %ymm2, %ymm5, %ymm5
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[3,3,3,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,4,5,6,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vmovdqa (%r8), %xmm10
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm10[3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm8
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm8[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3,4,5],xmm5[6],xmm2[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT:    vandnps %ymm1, %ymm12, %ymm1
+; AVX1-NEXT:    vandps %ymm2, %ymm12, %ymm2
+; AVX1-NEXT:    vorps %ymm1, %ymm2, %ymm14
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm14, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7]
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm5
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[3,3,3,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3,4,5,6],xmm1[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX1-NEXT:    vandnps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
-; AVX1-NEXT:    vmovaps %ymm2, %ymm14
-; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm13
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm13[4],xmm1[5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm6, %ymm1
+; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm9
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm9[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vmovdqa 16(%r8), %xmm11
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm11[3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm15[1,1,2,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm7, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm12, %ymm1
+; AVX1-NEXT:    vandps %ymm0, %ymm12, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm1
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm15 = xmm0[0,1,2,3],xmm7[4],xmm0[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1],xmm0[2,3,4,5],xmm7[6],xmm0[7]
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,2,4,5,6,7]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,3,3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,2,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,3]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,4,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,3,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,1]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
-; AVX1-NEXT:    vandnps %ymm6, %ymm14, %ymm6
-; AVX1-NEXT:    vandps %ymm0, %ymm14, %ymm0
-; AVX1-NEXT:    vorps %ymm6, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa (%r8), %xmm6
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm7[4],xmm0[5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT:    vandnps %ymm0, %ymm12, %ymm0
+; AVX1-NEXT:    vandps %ymm6, %ymm12, %ymm6
+; AVX1-NEXT:    vorps %ymm0, %ymm6, %ymm0
+; AVX1-NEXT:    vpsrlq $48, %xmm8, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6,7]
+; AVX1-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1],xmm0[2,3,4,5],xmm7[6],xmm0[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[1,0,2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2,3,4,5],xmm6[6],xmm0[7]
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[0,1,3,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,1]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; AVX1-NEXT:    vandnps %ymm3, %ymm12, %ymm3
+; AVX1-NEXT:    vandps %ymm2, %ymm12, %ymm2
+; AVX1-NEXT:    vorps %ymm3, %ymm2, %ymm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3,4,5],xmm4[6],xmm3[7]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm4 = zero,zero,xmm12[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm13[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,6,7,4,5,6,7,8,9,4,5,10,11,6,7]
+; AVX1-NEXT:    vpshufb %xmm7, %xmm6, %xmm8
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm8, %ymm6
+; AVX1-NEXT:    vandnps %ymm4, %ymm12, %ymm4
+; AVX1-NEXT:    vandps %ymm6, %ymm12, %ymm6
+; AVX1-NEXT:    vorps %ymm4, %ymm6, %ymm4
+; AVX1-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm6
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1],xmm4[2,3,4,5],xmm0[6],xmm4[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm10[4],xmm4[5,6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,2,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm4, %ymm4
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm7 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX1-NEXT:    vandnps %ymm3, %ymm7, %ymm3
-; AVX1-NEXT:    vandps %ymm7, %ymm4, %ymm4
-; AVX1-NEXT:    vorps %ymm3, %ymm4, %ymm3
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2],xmm5[3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm3[0,1,2],xmm6[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4],xmm4[5],xmm3[6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, 48(%r9)
-; AVX1-NEXT:    vmovdqa %xmm6, 32(%r9)
-; AVX1-NEXT:    vmovdqa %xmm0, 16(%r9)
-; AVX1-NEXT:    vmovdqa %xmm1, (%r9)
-; AVX1-NEXT:    vmovdqa %xmm2, 112(%r9)
-; AVX1-NEXT:    vmovdqa %xmm11, 96(%r9)
-; AVX1-NEXT:    vmovdqa %xmm9, 80(%r9)
-; AVX1-NEXT:    vmovdqa %xmm5, 64(%r9)
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
+; AVX1-NEXT:    vpshufb %xmm7, %xmm4, %xmm5
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,6]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vandnps %ymm1, %ymm12, %ymm1
+; AVX1-NEXT:    vandps %ymm4, %ymm12, %ymm4
+; AVX1-NEXT:    vorps %ymm1, %ymm4, %ymm1
+; AVX1-NEXT:    vpsrld $16, %xmm9, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0,1,2,3],xmm4[4],xmm1[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,7,6,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7]
+; AVX1-NEXT:    vmovdqa %xmm1, 144(%r9)
+; AVX1-NEXT:    vmovdqa %xmm4, 128(%r9)
+; AVX1-NEXT:    vmovdqa %xmm0, 304(%r9)
+; AVX1-NEXT:    vmovdqa %xmm6, 288(%r9)
+; AVX1-NEXT:    vmovdqa %xmm3, 16(%r9)
+; AVX1-NEXT:    vmovdqa %xmm2, (%r9)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm0, 80(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 144(%r9)
+; AVX1-NEXT:    vmovaps %xmm0, 64(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 128(%r9)
+; AVX1-NEXT:    vmovaps %xmm0, 112(%r9)
+; AVX1-NEXT:    vmovdqa %xmm15, 96(%r9)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vmovaps %xmm0, 48(%r9)
+; AVX1-NEXT:    vmovdqa %xmm14, 32(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps %xmm0, 176(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps %xmm0, 160(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 240(%r9)
+; AVX1-NEXT:    vmovaps %xmm0, 272(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 224(%r9)
+; AVX1-NEXT:    vmovaps %xmm0, 256(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps %xmm0, 208(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-NEXT:    vmovaps %xmm0, 192(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 304(%r9)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 288(%r9)
-; AVX1-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 272(%r9)
+; AVX1-NEXT:    vmovaps %xmm0, 240(%r9)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 256(%r9)
-; AVX1-NEXT:    addq $72, %rsp
+; AVX1-NEXT:    vmovaps %xmm0, 224(%r9)
+; AVX1-NEXT:    addq $152, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf32:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $40, %rsp
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm13
-; AVX2-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX2-SLOW-NEXT:    subq $72, %rsp
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm14
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm11
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm9
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm10
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm4
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm12
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm14, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm13
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,6]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[0,1,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm5, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13>
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm9[1,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2],xmm6[3],xmm5[4,5],xmm6[6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = mem[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[3,3,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4],xmm5[5,6],xmm1[7]
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm5
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm6, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %ymm9
+; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm14, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpbroadcastq (%r8), %ymm1
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm4, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm6, %xmm0
-; AVX2-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm13, %xmm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[1,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm11, %xmm6
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[1,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2],xmm1[3],xmm6[4,5],xmm1[6],xmm6[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm14, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm13[0,1,1,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = mem[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[3,3,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3],xmm7[4],xmm4[5,6],xmm7[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm4, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm3[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,2,6,7,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm10
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm1
-; AVX2-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm5
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5],xmm5[6],xmm1[7]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm6
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm15
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[1,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm14, %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm6[3,2,3,3,7,6,7,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2],ymm5[3,4],ymm7[5,6,7,8],ymm5[9],ymm7[10],ymm5[11,12],ymm7[13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,2]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm0, %ymm5, %ymm0
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[0,1,1,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa %ymm8, %ymm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm8[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm10[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,2,6,7,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7,8],ymm5[9],ymm9[10],ymm5[11],ymm9[12,13],ymm5[14],ymm9[15]
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm12
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm12[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm14
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm14[3,2,3,3,7,6,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2],ymm11[3,4],ymm9[5,6,7,8],ymm11[9],ymm9[10],ymm11[11,12],ymm9[13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,2]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm5, %ymm9, %ymm7
-; AVX2-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm5
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm0, %ymm5, %ymm5
-; AVX2-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm0
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm7, %ymm0, %ymm7
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm9
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm6[1,1,1,2,5,5,5,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2,3],ymm11[4],ymm9[5],ymm11[6],ymm9[7,8],ymm11[9],ymm9[10,11],ymm11[12],ymm9[13],ymm11[14],ymm9[15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm3[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm2[0,1,2,1,4,5,6,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3],ymm13[4],ymm11[5,6],ymm13[7],ymm11[8,9],ymm13[10],ymm11[11],ymm13[12],ymm11[13,14],ymm13[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm9, %ymm11, %ymm9
-; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm14[1,1,1,2,5,5,5,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm11[1],ymm0[2,3],ymm11[4],ymm0[5],ymm11[6],ymm0[7,8],ymm11[9],ymm0[10,11],ymm11[12],ymm0[13],ymm11[14],ymm0[15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm10[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,1,2,1,4,5,6,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm11[0,1],ymm8[2],ymm11[3],ymm8[4],ymm11[5,6],ymm8[7],ymm11[8,9],ymm8[10],ymm11[11],ymm8[12],ymm11[13,14],ymm8[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm12[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4,5],ymm0[6],ymm4[7,8],ymm0[9],ymm4[10],ymm0[11],ymm4[12,13],ymm0[14],ymm4[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[3,2,3,3,7,6,7,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,2,3,2,4,6,7,6]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3],ymm7[4],ymm4[5,6],ymm7[7],ymm4[8,9],ymm7[10],ymm4[11],ymm7[12],ymm4[13,14],ymm7[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm4, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25>
+; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7,8],ymm7[9],ymm4[10],ymm7[11],ymm4[12,13],ymm7[14],ymm4[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm12[0,1,2,1,4,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm2[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm7[2],ymm13[3],ymm7[4],ymm13[5,6],ymm7[7],ymm13[8,9],ymm7[10],ymm13[11],ymm7[12],ymm13[13,14],ymm7[15]
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm7
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm4, %ymm13, %ymm4
+; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm7[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm0[0],ymm13[1],ymm0[2],ymm13[3],ymm0[4,5],ymm13[6],ymm0[7,8],ymm13[9],ymm0[10],ymm13[11],ymm0[12,13],ymm13[14],ymm0[15]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm13[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm1[0,1,2,1,4,5,6,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3],ymm8[4],ymm0[5,6],ymm8[7],ymm0[8,9],ymm8[10],ymm0[11],ymm8[12],ymm0[13,14],ymm8[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm15[2,3,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm8, %ymm0, %ymm15
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm1[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm7[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm8[0],ymm0[1],ymm8[2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7,8],ymm0[9],ymm8[10],ymm0[11],ymm8[12,13],ymm0[14],ymm8[15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,2,3,2,4,6,7,6]
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm0[3,2,3,3,7,6,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1],ymm8[2],ymm10[3],ymm8[4],ymm10[5,6],ymm8[7],ymm10[8,9],ymm8[10],ymm10[11],ymm8[12],ymm10[13,14],ymm8[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm0, %ymm8, %ymm0
-; AVX2-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm8
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm9, %ymm8, %ymm8
-; AVX2-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm9
-; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm0, %ymm9, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %ymm9, %ymm15, %ymm11
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[3,0,3,0,7,4,7,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm11[1],ymm6[2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7,8],ymm11[9],ymm6[10],ymm11[11],ymm6[12,13],ymm11[14],ymm6[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm6, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vpshufb %ymm9, %ymm12, %ymm6
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm14[3,0,3,0,7,4,7,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm6[1],ymm9[2],ymm6[3],ymm9[4,5],ymm6[6],ymm9[7,8],ymm6[9],ymm9[10],ymm6[11],ymm9[12,13],ymm6[14],ymm9[15]
-; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm9
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm1[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5],ymm9[6],ymm10[7,8],ymm9[9],ymm10[10,11],ymm9[12],ymm10[13],ymm9[14],ymm10[15]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm6, %ymm9, %ymm3
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,2,2]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm2, %ymm4, %ymm2
-; AVX2-SLOW-NEXT:    vpermq $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm4 = mem[1,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 64(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 224(%r9)
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm2, %ymm8, %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm6, %ymm10
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm14[3,0,3,0,7,4,7,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10],ymm10[11],ymm11[12,13],ymm10[14],ymm11[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm12[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm11[0],ymm9[1,2],ymm11[3],ymm9[4],ymm11[5],ymm9[6,7],ymm11[8],ymm9[9,10],ymm11[11],ymm9[12],ymm11[13],ymm9[14,15]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm10, %ymm9, %ymm9
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm5, %ymm8
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm0[3,0,3,0,7,4,7,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2],ymm8[3],ymm10[4,5],ymm8[6],ymm10[7,8],ymm8[9],ymm10[10],ymm8[11],ymm10[12,13],ymm8[14],ymm10[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1,2],ymm1[3],ymm7[4],ymm1[5],ymm7[6,7],ymm1[8],ymm7[9,10],ymm1[11],ymm7[12],ymm1[13],ymm7[14,15]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm8, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpbroadcastq (%r8), %ymm3
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpbroadcastq 40(%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm10
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpbroadcastq 8(%rdi), %ymm11
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm14[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm4, %ymm12, %ymm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm15, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm2, %ymm5, %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm12, %ymm12
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm9, %ymm12, %ymm9
+; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm13, %ymm5
+; AVX2-SLOW-NEXT:    vpblendvb %ymm7, %ymm1, %ymm5, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 64(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm9, 224(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 128(%r9)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 96(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 128(%r9)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 192(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm5, 288(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 256(%r9)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%r9)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, (%r9)
-; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 160(%r9)
-; AVX2-SLOW-NEXT:    addq $40, %rsp
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 256(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 288(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm11, 32(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm10, 160(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 192(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%r9)
+; AVX2-SLOW-NEXT:    addq $72, %rsp
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
-; AVX2-FAST-LABEL: vf32:
-; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    subq $40, %rsp
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm12
-; AVX2-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
-; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm8
-; AVX2-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm4
-; AVX2-FAST-NEXT:    vmovdqa 32(%rsi), %xmm13
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm5
-; AVX2-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm6
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm5[0,1,0,1]
-; AVX2-FAST-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX2-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm1
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX2-FAST-NEXT:    vmovdqa 32(%rdx), %xmm0
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[1,2,2,2]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm9, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm8[0,1,1,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm1, %ymm9, %ymm1
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm1
-; AVX2-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm3
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,2,2,2]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX2-FAST-NEXT:    vmovdqa 32(%r8), %ymm3
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,1,1,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm1
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX2-FAST-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vmovdqa 32(%rcx), %ymm10
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vpbroadcastq (%r8), %ymm2
-; AVX2-FAST-NEXT:    vpblendvb %ymm13, %ymm1, %ymm2, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29>
-; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7,8],ymm4[9],ymm2[10],ymm4[11],ymm2[12,13],ymm4[14],ymm2[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,2]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31>
-; AVX2-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm4
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm9[3,2,3,3,7,6,7,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2],ymm6[3,4],ymm4[5,6,7,8],ymm6[9],ymm4[10],ymm6[11,12],ymm4[13,14,15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,2]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u>
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm2, %ymm4, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX2-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2],ymm7[3],ymm1[4,5],ymm7[6],ymm1[7,8],ymm7[9],ymm1[10],ymm7[11],ymm1[12,13],ymm7[14],ymm1[15]
-; AVX2-FAST-NEXT:    vmovdqa (%rcx), %ymm7
-; AVX2-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm5
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm13
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm13[3,2,3,3,7,6,7,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2],ymm15[3,4],ymm5[5,6,7,8],ymm15[9],ymm5[10],ymm15[11,12],ymm5[13,14,15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,2]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,2]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm5
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm2, %ymm5, %ymm5
-; AVX2-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm2
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm6
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm9[1,1,1,2,5,5,5,6]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm15[1],ymm6[2,3],ymm15[4],ymm6[5],ymm15[6],ymm6[7,8],ymm15[9],ymm6[10,11],ymm15[12],ymm6[13],ymm15[14],ymm6[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm11, %ymm0
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm12[0,1,2,1,4,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3],ymm8[4],ymm0[5,6],ymm8[7],ymm0[8,9],ymm8[10],ymm0[11],ymm8[12],ymm0[13,14],ymm8[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm6, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm13[1,1,1,2,5,5,5,6]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2,3],ymm6[4],ymm2[5],ymm6[6],ymm2[7,8],ymm6[9],ymm2[10,11],ymm6[12],ymm2[13],ymm6[14],ymm2[15]
-; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm4, %ymm6
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm14[0,1,2,1,4,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm15[2],ymm6[3],ymm15[4],ymm6[5,6],ymm15[7],ymm6[8,9],ymm15[10],ymm6[11],ymm15[12],ymm6[13,14],ymm15[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm2, %ymm6, %ymm2
-; AVX2-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm6
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm0, %ymm6, %ymm0
-; AVX2-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm6
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm2, %ymm6, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm10, %ymm8
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[3,0,3,0,7,4,7,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7,8],ymm8[9],ymm9[10],ymm8[11],ymm9[12,13],ymm8[14],ymm9[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm11, %ymm10
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm12[1,1,2,2]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10,11],ymm10[12],ymm11[13],ymm10[14],ymm11[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255,255,255,u,u,0,0,0,0,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm8, %ymm10, %ymm8
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm13[3,0,3,0,7,4,7,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7,8],ymm6[9],ymm7[10],ymm6[11],ymm7[12,13],ymm6[14],ymm7[15]
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm14[1,1,2,2]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0],ymm4[1],ymm7[2,3],ymm4[4],ymm7[5],ymm4[6],ymm7[7,8],ymm4[9],ymm7[10,11],ymm4[12],ymm7[13],ymm4[14],ymm7[15]
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm6, %ymm4, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[1,1,2,2]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm8, %ymm3, %ymm3
-; AVX2-FAST-NEXT:    vpermq $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm7 = mem[1,1,2,2]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm4, %ymm7, %ymm4
-; AVX2-FAST-NEXT:    vmovdqa %ymm4, 64(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm3, 224(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm2, 96(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm1, 128(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm5, 288(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm0, 256(%r9)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm0, (%r9)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm0, 160(%r9)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm0, 192(%r9)
-; AVX2-FAST-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm0, 32(%r9)
-; AVX2-FAST-NEXT:    addq $40, %rsp
-; AVX2-FAST-NEXT:    vzeroupper
-; AVX2-FAST-NEXT:    retq
+; AVX2-FAST-ALL-LABEL: vf32:
+; AVX2-FAST-ALL:       # %bb.0:
+; AVX2-FAST-ALL-NEXT:    subq $72, %rsp
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %ymm15
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm11
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm10
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %xmm1
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm4, %ymm2
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13>
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm13, %xmm0, %xmm3
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[1,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,6,7,u,u,10,11,u,u,u,u,8,9>
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm7, %xmm1, %xmm3
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm6 = mem[2,1,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm3[2],xmm6[3],xmm3[4],xmm6[5,6],xmm3[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %ymm14
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm4, %ymm6, %ymm2
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %ymm4
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %ymm3
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm13, %xmm9, %xmm0
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm11[1,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm7, %xmm10, %xmm1
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm5 = mem[2,1,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4],xmm5[5,6],xmm1[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm0 = ymm15[2,3,2,3,6,7,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[2,3,2,3,6,7,6,7]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29>
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm5, %ymm14, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm7 = ymm4[3,2,3,3,7,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1],ymm1[2],ymm7[3],ymm1[4],ymm7[5,6],ymm1[7],ymm7[8,9],ymm1[10],ymm7[11],ymm1[12],ymm7[13,14],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25>
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm0, %ymm6, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm7 = ymm3[0,1,0,1,4,5,4,5]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2],ymm7[3],ymm1[4,5],ymm7[6],ymm1[7,8],ymm7[9],ymm1[10],ymm7[11],ymm1[12,13],ymm7[14],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u>
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm7, %ymm14, %ymm11
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm12 = ymm15[0,1,2,1,4,5,6,5]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3],ymm12[4],ymm11[5,6],ymm12[7],ymm11[8,9],ymm12[10],ymm11[11],ymm12[12],ymm11[13,14],ymm12[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm1, %ymm11, %ymm1
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm11
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm0, %ymm11, %ymm1
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm2
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm13 = ymm2[0,1,0,1,4,5,4,5]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2],ymm13[3],ymm1[4,5],ymm13[6],ymm1[7,8],ymm13[9],ymm1[10],ymm13[11],ymm1[12,13],ymm13[14],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm7, %ymm13, %ymm7
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm9 = ymm0[0,1,2,1,4,5,6,5]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3],ymm9[4],ymm7[5,6],ymm9[7],ymm7[8,9],ymm9[10],ymm7[11],ymm9[12],ymm7[13,14],ymm9[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm1, %ymm7, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[2,3,2,3,6,7,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[2,3,2,3,6,7,6,7]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1],ymm9[2],ymm7[3],ymm9[4,5],ymm7[6],ymm9[7,8],ymm7[9],ymm9[10],ymm7[11],ymm9[12,13],ymm7[14],ymm9[15]
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm5, %ymm13, %ymm5
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm9
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm10 = ymm9[3,2,3,3,7,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3],ymm5[4],ymm10[5,6],ymm5[7],ymm10[8,9],ymm5[10],ymm10[11],ymm5[12],ymm10[13,14],ymm5[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm7, %ymm5, %ymm5
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm7, %ymm6, %ymm10
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm12 = ymm4[3,0,3,0,7,4,7,4]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8],ymm10[9],ymm12[10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm12 = ymm15[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0],ymm3[1,2],ymm12[3],ymm3[4],ymm12[5],ymm3[6,7],ymm12[8],ymm3[9,10],ymm12[11],ymm3[12],ymm12[13],ymm3[14,15]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm10, %ymm3, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm7, %ymm11, %ymm7
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm10 = ymm9[3,0,3,0,7,4,7,4]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm7 = ymm10[0],ymm7[1],ymm10[2],ymm7[3],ymm10[4,5],ymm7[6],ymm10[7,8],ymm7[9],ymm10[10],ymm7[11],ymm10[12,13],ymm7[14],ymm10[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13],ymm2[14,15]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm7, %ymm0, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq (%r8), %ymm2
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq 40(%rdi), %ymm8
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq 32(%r8), %ymm10
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq 8(%rdi), %ymm12
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq {{.*#+}} ymm15 = [25769803781,25769803781,25769803781,25769803781]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm4, %ymm15, %ymm4
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm9, %ymm15, %ymm9
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm1, %ymm9, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm9, %ymm5
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31]
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm9, %ymm14, %ymm11
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm11, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm9, %ymm13, %ymm9
+; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm7, %ymm0, %ymm9, %ymm0
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 64(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm3, 224(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm5, 128(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 96(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm4, 256(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm6, 288(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm12, 32(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm10, 160(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm8, 192(%r9)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX2-FAST-ALL-NEXT:    addq $72, %rsp
+; AVX2-FAST-ALL-NEXT:    vzeroupper
+; AVX2-FAST-ALL-NEXT:    retq
+;
+; AVX2-FAST-PERLANE-LABEL: vf32:
+; AVX2-FAST-PERLANE:       # %bb.0:
+; AVX2-FAST-PERLANE-NEXT:    subq $72, %rsp
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm15
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm0, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,6,7,u,u,10,11,u,u,u,u,8,9>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm2, %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm6 = mem[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4],xmm6[5,6],xmm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm4, %ymm6, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm9, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm12[1,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm11, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = mem[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm0 = ymm15[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm8[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm10, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[3,2,3,3,7,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm6, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm8[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm10, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm12 = ymm15[0,1,2,1,4,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3],ymm12[4],ymm11[5,6],ymm12[7],ymm11[8,9],ymm12[10],ymm11[11],ymm12[12],ymm11[13,14],ymm12[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm1, %ymm11, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm13 = ymm2[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm13[1],ymm0[2],ymm13[3],ymm0[4,5],ymm13[6],ymm0[7,8],ymm13[9],ymm0[10],ymm13[11],ymm0[12,13],ymm13[14],ymm0[15]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm13, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm14 = ymm0[0,1,2,1,4,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm14[2],ymm4[3],ymm14[4],ymm4[5,6],ymm14[7],ymm4[8,9],ymm14[10],ymm4[11],ymm14[12],ymm4[13,14],ymm14[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm4 = ymm0[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm14 = ymm2[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm14[0],ymm4[1],ymm14[2],ymm4[3],ymm14[4,5],ymm4[6],ymm14[7,8],ymm4[9],ymm14[10],ymm4[11],ymm14[12,13],ymm4[14],ymm14[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm13, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm9 = ymm14[3,2,3,3,7,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0,1],ymm5[2],ymm9[3],ymm5[4],ymm9[5,6],ymm5[7],ymm9[8,9],ymm5[10],ymm9[11],ymm5[12],ymm9[13,14],ymm5[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm4, %ymm5, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm6, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[3,0,3,0,7,4,7,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0],ymm9[1],ymm12[2],ymm9[3],ymm12[4,5],ymm9[6],ymm12[7,8],ymm9[9],ymm12[10],ymm9[11],ymm12[12,13],ymm9[14],ymm12[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm15[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm12[0],ymm8[1,2],ymm12[3],ymm8[4],ymm12[5],ymm8[6,7],ymm12[8],ymm8[9,10],ymm12[11],ymm8[12],ymm12[13],ymm8[14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm9, %ymm8, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm11, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm9 = ymm14[3,0,3,0,7,4,7,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7,8],ymm5[9],ymm9[10],ymm5[11],ymm9[12,13],ymm5[14],ymm9[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13],ymm2[14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm5, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq (%r8), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq 40(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, (%rsp), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq 32(%r8), %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq 8(%rdi), %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm1, %ymm14, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm4, %ymm11, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm11, %ymm10, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm8, %ymm10, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm11, %ymm13, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm0, %ymm10, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 64(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, 224(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, 128(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 96(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm7, 256(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, 288(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm12, 32(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, 160(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm5, 192(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX2-FAST-PERLANE-NEXT:    addq $72, %rsp
+; AVX2-FAST-PERLANE-NEXT:    vzeroupper
+; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; AVX512-LABEL: vf32:
 ; AVX512:       # %bb.0:
@@ -2101,38 +2462,34 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
 ; AVX512-NEXT:    vmovdqu16 %zmm5, %zmm6 {%k1}
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31]
 ; AVX512-NEXT:    vpermi2w %zmm4, %zmm6, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,u,u,7,39,u,u,u,8,40,u,u,u,9,41,u,u,u,10,42,u,u,u,11,43,u,u,u,12,44,u,u>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm6
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,u,6,39,u,u,u,7,40,u,u,u,8,41,u,u,u,9,42,u,u,u,10,43,u,u,u,11,44,u,u,u>
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm4, %zmm6
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <6,38,u,u,u,7,39,u,u,u,8,40,u,u,u,9,41,u,u,u,10,42,u,u,u,11,43,u,u,u,12,44>
 ; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm7
-; AVX512-NEXT:    movl $831283992, %eax # imm = 0x318C6318
-; AVX512-NEXT:    kmovd %eax, %k2
-; AVX512-NEXT:    vmovdqu16 %zmm6, %zmm7 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,38,3,4,5,6,39,8,9,10,11,40,13,14,15,16,41,18,19,20,21,42,23,24,25,26,43,28,29,30,31]
-; AVX512-NEXT:    vpermi2w %zmm4, %zmm7, %zmm6
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,u,13,45,u,u,u,14,46,u,u,u,15,47,u,u,u,16,48,u,u,u,17,49,u,u,u,18,50,u,u>
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,13,45,u,u,u,14,46,u,u,u,15,47,u,u,u,16,48,u,u,u,17,49,u,u,u,18,50,u,u,u,19>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm8
-; AVX512-NEXT:    vmovdqu16 %zmm7, %zmm8 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [44,1,2,3,4,45,6,7,8,9,46,11,12,13,14,47,16,17,18,19,48,21,22,23,24,49,26,27,28,29,50,31]
-; AVX512-NEXT:    vpermi2w %zmm4, %zmm8, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,19,51,u,u,u,20,52,u,u,u,21,53,u,u,u,22,54,u,u,u,23,55,u,u,u,24,56,u,u,u,25>
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm8
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <19,u,u,u,52,20,u,u,u,53,21,u,u,u,54,22,u,u,u,55,23,u,u,u,56,24,u,u,u,57,25,u>
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm1, %zmm9
-; AVX512-NEXT:    movl $-1939662650, %eax # imm = 0x8C6318C6
-; AVX512-NEXT:    kmovd %eax, %k2
-; AVX512-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,51,4,5,6,7,52,9,10,11,12,53,14,15,16,17,54,19,20,21,22,55,24,25,26,27,56,29,30,31]
-; AVX512-NEXT:    vpermi2w %zmm4, %zmm9, %zmm8
+; AVX512-NEXT:    vmovdqu16 %zmm6, %zmm7 {%k1}
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,39,5,6,7,8,40,10,11,12,13,41,15,16,17,18,42,20,21,22,23,43,25,26,27,28,44,30,31]
+; AVX512-NEXT:    vpermi2w %zmm1, %zmm7, %zmm6
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,13,45,u,u,u,14,46,u,u,u,15,47,u,u,u,16,48,u,u,u,17,49,u,u,u,18,50,u,u,u>
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm7
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <12,45,u,u,u,13,46,u,u,u,14,47,u,u,u,15,48,u,u,u,16,49,u,u,u,17,50,u,u,u,18,51>
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm4, %zmm8
+; AVX512-NEXT:    vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,45,5,6,7,8,46,10,11,12,13,47,15,16,17,18,48,20,21,22,23,49,25,26,27,28,50,30,31]
+; AVX512-NEXT:    vpermi2w %zmm3, %zmm8, %zmm7
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,u,19,51,u,u,u,20,52,u,u,u,21,53,u,u,u,22,54,u,u,u,23,55,u,u,u,24,56,u,u,u>
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <19,51,u,u,u,20,52,u,u,u,21,53,u,u,u,22,54,u,u,u,23,55,u,u,u,24,56,u,u,u,25,57>
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm9
+; AVX512-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k1}
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,52,5,6,7,8,53,10,11,12,13,54,15,16,17,18,55,20,21,22,23,56,25,26,27,28,57,30,31]
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm9, %zmm8
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <u,u,26,58,u,u,u,27,59,u,u,u,28,60,u,u,u,29,61,u,u,u,30,62,u,u,u,31,63,u,u,u>
 ; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm9
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <25,u,u,u,58,26,u,u,u,59,27,u,u,u,60,28,u,u,u,61,29,u,u,u,62,30,u,u,u,63,31,u>
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <25,57,u,u,u,26,58,u,u,u,27,59,u,u,u,28,60,u,u,u,29,61,u,u,u,30,62,u,u,u,31,63>
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm0
 ; AVX512-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k1}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,57,2,3,4,5,58,7,8,9,10,59,12,13,14,15,60,17,18,19,20,61,22,23,24,25,62,27,28,29,30,63]
-; AVX512-NEXT:    vpermi2w %zmm4, %zmm0, %zmm1
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,58,5,6,7,8,59,10,11,12,13,60,15,16,17,18,61,20,21,22,23,62,25,26,27,28,63,30,31]
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
 ; AVX512-NEXT:    vmovdqu64 %zmm1, 256(%r9)
 ; AVX512-NEXT:    vmovdqu64 %zmm8, 192(%r9)
 ; AVX512-NEXT:    vmovdqu64 %zmm7, 128(%r9)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 52678aa25bbaf..30efc4dee308e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -18,23 +18,19 @@ define void @vf2(<2 x i16>* %in.vecptr0, <2 x i16>* %in.vecptr1, <2 x i16>* %in.
 ; SSE-NEXT:    movdqa (%r9), %xmm3
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,5,4,6]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[3,3]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[2,0,3,3,4,5,6,7]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,2,0,3,4,5,6,7]
-; SSE-NEXT:    movaps %xmm0, (%rax)
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[1,3,2,3,4,5,6,7]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,3,1,3,4,5,6,7]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; SSE-NEXT:    movq %xmm1, 16(%rax)
+; SSE-NEXT:    movaps %xmm0, (%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf2:
@@ -153,28 +149,23 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
 ; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; SSE-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
 ; SSE-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
-; SSE-NEXT:    movdqa %xmm1, %xmm5
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,1,2,0]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,4,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,1],xmm0[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm6[0,2]
-; SSE-NEXT:    movdqa %xmm0, %xmm6
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm1[1]
 ; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[3,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[3,1,1,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    movaps %xmm2, 16(%rax)
-; SSE-NEXT:    movaps %xmm5, (%rax)
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3]
+; SSE-NEXT:    movdqa %xmm1, %xmm4
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[0,2]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm2, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm2[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,2]
 ; SSE-NEXT:    movaps %xmm0, 32(%rax)
+; SSE-NEXT:    movaps %xmm5, 16(%rax)
+; SSE-NEXT:    movaps %xmm4, (%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf4:
@@ -353,129 +344,99 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 ; SSE-LABEL: vf8:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movdqa (%rdi), %xmm10
-; SSE-NEXT:    movdqa (%rsi), %xmm0
-; SSE-NEXT:    movdqa (%rdx), %xmm2
-; SSE-NEXT:    movdqa (%rcx), %xmm1
-; SSE-NEXT:    movdqa (%r8), %xmm3
-; SSE-NEXT:    movdqa (%r9), %xmm5
-; SSE-NEXT:    movdqa %xmm2, %xmm9
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
-; SSE-NEXT:    movdqa %xmm10, %xmm12
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE-NEXT:    movdqa %xmm12, %xmm4
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,3],xmm9[3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm3[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,2],xmm6[0,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0,1,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm11 = [65535,0,65535,65535,65535,65535,65535,0]
-; SSE-NEXT:    andps %xmm11, %xmm4
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm5[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; SSE-NEXT:    movaps %xmm11, %xmm8
-; SSE-NEXT:    andnps %xmm6, %xmm8
-; SSE-NEXT:    orps %xmm4, %xmm8
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE-NEXT:    movdqa %xmm10, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm2[3,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    andps %xmm11, %xmm0
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; SSE-NEXT:    andnps %xmm1, %xmm11
-; SSE-NEXT:    orps %xmm0, %xmm11
-; SSE-NEXT:    movdqa %xmm9, %xmm0
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm12[0]
+; SSE-NEXT:    movdqa (%rdi), %xmm3
+; SSE-NEXT:    movdqa (%rsi), %xmm8
+; SSE-NEXT:    movdqa (%rdx), %xmm0
+; SSE-NEXT:    movdqa (%rcx), %xmm9
+; SSE-NEXT:    movdqa (%r8), %xmm5
+; SSE-NEXT:    movdqa (%r9), %xmm10
 ; SSE-NEXT:    movdqa %xmm3, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm12[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,2]
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT:    andps %xmm4, %xmm0
-; SSE-NEXT:    movdqa %xmm5, %xmm6
-; SSE-NEXT:    pslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5]
-; SSE-NEXT:    movaps %xmm4, %xmm1
-; SSE-NEXT:    andnps %xmm6, %xmm1
-; SSE-NEXT:    orps %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm10, %xmm0
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
-; SSE-NEXT:    movdqa %xmm3, %xmm7
-; SSE-NEXT:    psrldq {{.*#+}} xmm7 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[0,2]
-; SSE-NEXT:    movaps {{.*#+}} xmm6 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    andps %xmm6, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm13 = xmm5[2,2,3,3]
-; SSE-NEXT:    movaps %xmm6, %xmm0
-; SSE-NEXT:    andnps %xmm13, %xmm0
-; SSE-NEXT:    orps %xmm7, %xmm0
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm10[0]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm3[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,1],xmm10[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[0,2]
-; SSE-NEXT:    andps %xmm4, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[0,0,1,1]
-; SSE-NEXT:    pslld $16, %xmm5
-; SSE-NEXT:    andnps %xmm5, %xmm4
-; SSE-NEXT:    orps %xmm2, %xmm4
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm9[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm9[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm12[0,2]
-; SSE-NEXT:    andps %xmm6, %xmm7
-; SSE-NEXT:    andnps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm7, %xmm6
-; SSE-NEXT:    movaps %xmm6, 16(%rax)
-; SSE-NEXT:    movaps %xmm4, 48(%rax)
-; SSE-NEXT:    movaps %xmm0, 64(%rax)
-; SSE-NEXT:    movaps %xmm1, (%rax)
-; SSE-NEXT:    movaps %xmm11, 80(%rax)
-; SSE-NEXT:    movaps %xmm8, 32(%rax)
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
+; SSE-NEXT:    movdqa %xmm5, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
+; SSE-NEXT:    movdqa %xmm2, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[1,3]
+; SSE-NEXT:    movdqa %xmm0, %xmm13
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3]
+; SSE-NEXT:    movdqa %xmm13, %xmm11
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm11 = xmm11[0],xmm1[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[2,0],xmm7[0,2]
+; SSE-NEXT:    movdqa %xmm13, %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm2[3,3]
+; SSE-NEXT:    movaps %xmm1, %xmm12
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[3,1],xmm2[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[2,0],xmm6[0,2]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE-NEXT:    movdqa %xmm3, %xmm6
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm0[1]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; SSE-NEXT:    movdqa %xmm5, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm0[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm6[0,2]
+; SSE-NEXT:    movaps %xmm0, %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm5[3,3]
+; SSE-NEXT:    movdqa %xmm3, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,1],xmm5[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm6[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm3[1,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[0,2]
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm13[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm13[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[0,2]
+; SSE-NEXT:    movaps %xmm2, 16(%rax)
+; SSE-NEXT:    movaps %xmm0, 48(%rax)
+; SSE-NEXT:    movaps %xmm4, 80(%rax)
+; SSE-NEXT:    movaps %xmm7, 64(%rax)
+; SSE-NEXT:    movaps %xmm12, 32(%rax)
+; SSE-NEXT:    movaps %xmm11, (%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf8:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm8
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm9
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX1-NEXT:    vmovdqa (%r8), %xmm11
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm8
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX1-NEXT:    vmovdqa (%r8), %xmm4
 ; AVX1-NEXT:    vmovdqa (%r9), %xmm5
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5],xmm0[6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm2, %ymm10
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,1,0,1]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[0,0,1,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[2,2,3,3]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm4[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5,6,7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5],xmm0[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[2,2,3,3]
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm5 = xmm6[1],xmm5[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3,4,5],xmm1[6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm4[1],xmm1[1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[2,3,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3,4,5],xmm5[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5],xmm2[6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[2,2,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-NEXT:    vmovaps %ymm1, 64(%rax)
 ; AVX1-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX1-NEXT:    vmovaps %ymm10, (%rax)
@@ -488,33 +449,33 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-SLOW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
+; AVX2-SLOW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 32(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%rax)
@@ -527,41 +488,43 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 ; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-ALL-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,4,1,5,0,4,1,5]
 ; AVX2-FAST-ALL-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm3, %ymm3
 ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,24,25,28,29]
+; AVX2-FAST-ALL-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
 ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,4,4,0,0,4,4,0]
 ; AVX2-FAST-ALL-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX2-FAST-ALL-NEXT:    vpermd %ymm2, %ymm4, %ymm4
 ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,12,13,8,9,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [2,6,1,5,2,6,1,5]
-; AVX2-FAST-ALL-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm4, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,10,11,14,15,u,u,u,u,u,u,u,u,16,17,20,21,u,u,u,u,u,u,u,u]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
 ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [7,3,3,7,7,3,3,7]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [2,6,1,5,2,6,1,5]
 ; AVX2-FAST-ALL-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm5, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm5, %ymm5
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,10,11,14,15,u,u,u,u,u,u,u,u,16,17,20,21,u,u,u,u,u,u,u,u]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
+; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [2,6,3,7,2,6,3,7]
+; AVX2-FAST-ALL-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm5, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,3,6,7,u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,26,27,30,31,u,u,u,u]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [7,3,3,7,7,3,3,7]
+; AVX2-FAST-ALL-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm2, %ymm0
 ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,22,23,18,19,u,u,u,u,u,u,u,u]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
 ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 64(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm4, 32(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm3, (%rax)
@@ -574,33 +537,33 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[0,2,1,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[0,2,1,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,2,1,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 64(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, 32(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm3, (%rax)
@@ -648,437 +611,301 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.
 define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <16 x i16>* %in.vecptr3, <16 x i16>* %in.vecptr4, <16 x i16>* %in.vecptr5, <96 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm11
-; SSE-NEXT:    movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rdi), %xmm14
-; SSE-NEXT:    movdqa (%rsi), %xmm15
-; SSE-NEXT:    movdqa 16(%rsi), %xmm4
-; SSE-NEXT:    movdqa (%rdx), %xmm8
-; SSE-NEXT:    movdqa 16(%rdx), %xmm1
-; SSE-NEXT:    movdqa (%rcx), %xmm10
-; SSE-NEXT:    movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rcx), %xmm6
-; SSE-NEXT:    movdqa 16(%r8), %xmm3
-; SSE-NEXT:    movdqa 16(%r9), %xmm13
-; SSE-NEXT:    movdqa %xmm1, %xmm7
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE-NEXT:    movdqa %xmm14, %xmm9
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE-NEXT:    movdqa %xmm9, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm7[3,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm12 = [65535,0,65535,65535,65535,65535,65535,0]
-; SSE-NEXT:    andps %xmm12, %xmm0
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm13[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
-; SSE-NEXT:    movaps %xmm12, %xmm2
-; SSE-NEXT:    andnps %xmm5, %xmm2
-; SSE-NEXT:    orps %xmm0, %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm4[0],xmm14[1],xmm4[1],xmm14[2],xmm4[2],xmm14[3],xmm4[3]
-; SSE-NEXT:    movdqa %xmm14, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm5[0,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    andps %xmm12, %xmm0
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm13[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
-; SSE-NEXT:    movaps %xmm12, %xmm2
-; SSE-NEXT:    andnps %xmm5, %xmm2
-; SSE-NEXT:    orps %xmm0, %xmm2
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm8, %xmm4
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
-; SSE-NEXT:    movdqa %xmm11, %xmm2
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE-NEXT:    movdqa %xmm15, %xmm10
-; SSE-NEXT:    movdqa %xmm2, %xmm6
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,3],xmm4[3,3]
-; SSE-NEXT:    movdqa (%r8), %xmm11
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm11[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,2],xmm5[2,3]
-; SSE-NEXT:    movdqa (%r9), %xmm15
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm15[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
-; SSE-NEXT:    movaps %xmm12, %xmm0
-; SSE-NEXT:    andnps %xmm5, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0,1,3]
-; SSE-NEXT:    andps %xmm12, %xmm6
-; SSE-NEXT:    orps %xmm6, %xmm0
-; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm8 = xmm8[0],mem[0],xmm8[1],mem[1],xmm8[2],mem[2],xmm8[3],mem[3]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
-; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,3],xmm8[3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm11[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,2],xmm6[0,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0,1,3]
-; SSE-NEXT:    andps %xmm12, %xmm5
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm15[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; SSE-NEXT:    andnps %xmm6, %xmm12
-; SSE-NEXT:    orps %xmm5, %xmm12
-; SSE-NEXT:    movdqa %xmm9, %xmm6
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm7[1]
-; SSE-NEXT:    movdqa %xmm3, %xmm5
-; SSE-NEXT:    psrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm7[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm6[0,2]
-; SSE-NEXT:    movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    pshufd {{.*#+}} xmm10 = xmm13[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    pandn %xmm10, %xmm0
-; SSE-NEXT:    andps %xmm6, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    movdqa (%rdi), %xmm13
+; SSE-NEXT:    movdqa 16(%rdi), %xmm7
+; SSE-NEXT:    movdqa (%rsi), %xmm9
+; SSE-NEXT:    movdqa 16(%rsi), %xmm14
+; SSE-NEXT:    movdqa (%rdx), %xmm2
+; SSE-NEXT:    movdqa 16(%rdx), %xmm3
+; SSE-NEXT:    movdqa (%rcx), %xmm0
 ; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm9[0]
-; SSE-NEXT:    movdqa %xmm3, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,1],xmm9[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[0,2]
-; SSE-NEXT:    movdqa %xmm13, %xmm5
-; SSE-NEXT:    pslld $16, %xmm5
-; SSE-NEXT:    movdqa {{.*#+}} xmm9 = [65535,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm9, %xmm10
-; SSE-NEXT:    pandn %xmm5, %xmm10
-; SSE-NEXT:    andps %xmm9, %xmm7
-; SSE-NEXT:    por %xmm7, %xmm10
-; SSE-NEXT:    movdqa %xmm14, %xmm5
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm1[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm3[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm13[0,0,1,1]
-; SSE-NEXT:    movdqa %xmm6, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    andps %xmm6, %xmm0
-; SSE-NEXT:    por %xmm0, %xmm7
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm14[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm14[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[0,2]
-; SSE-NEXT:    pslldq {{.*#+}} xmm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm13[0,1,2,3,4,5]
-; SSE-NEXT:    movdqa %xmm9, %xmm3
-; SSE-NEXT:    pandn %xmm13, %xmm3
-; SSE-NEXT:    andps %xmm9, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm3
-; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    movdqa 16(%rcx), %xmm15
+; SSE-NEXT:    movdqa 16(%r8), %xmm1
+; SSE-NEXT:    movdqa 16(%r9), %xmm5
+; SSE-NEXT:    movdqa %xmm1, %xmm4
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE-NEXT:    movdqa %xmm3, %xmm8
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7]
+; SSE-NEXT:    movdqa %xmm8, %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm4[3,3]
+; SSE-NEXT:    movdqa %xmm7, %xmm12
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
+; SSE-NEXT:    movdqa %xmm12, %xmm10
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[3,1],xmm4[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[2,0],xmm6[0,2]
+; SSE-NEXT:    movdqa %xmm12, %xmm6
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm8[1]
+; SSE-NEXT:    movaps %xmm4, %xmm11
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,1],xmm8[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[2,0],xmm6[0,2]
+; SSE-NEXT:    movdqa (%r8), %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm12[1,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm8 = xmm8[0],xmm12[0]
+; SSE-NEXT:    movdqa (%r9), %xmm12
+; SSE-NEXT:    movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[0,2]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; SSE-NEXT:    movdqa %xmm3, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,3],xmm1[3,3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
+; SSE-NEXT:    movdqa %xmm7, %xmm14
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[3,1],xmm1[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[2,0],xmm4[0,2]
+; SSE-NEXT:    movdqa %xmm7, %xmm4
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm1, %xmm15
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,1],xmm3[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[2,0],xmm4[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[1,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2]
+; SSE-NEXT:    movdqa %xmm6, %xmm1
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,3],xmm1[3,3]
+; SSE-NEXT:    movdqa %xmm13, %xmm7
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
+; SSE-NEXT:    movdqa %xmm9, %xmm12
+; SSE-NEXT:    movdqa %xmm7, %xmm9
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[3,1],xmm1[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,0],xmm5[0,2]
+; SSE-NEXT:    movdqa %xmm7, %xmm0
 ; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; SSE-NEXT:    movdqa %xmm11, %xmm5
-; SSE-NEXT:    psrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; SSE-NEXT:    movaps %xmm1, %xmm5
 ; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm4[1,1]
 ; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm6, %xmm1
-; SSE-NEXT:    pandn %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm6, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm1
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0]
-; SSE-NEXT:    movdqa %xmm11, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,1],xmm2[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[0,2]
-; SSE-NEXT:    movdqa %xmm15, %xmm0
-; SSE-NEXT:    pslld $16, %xmm0
-; SSE-NEXT:    movdqa %xmm9, %xmm5
-; SSE-NEXT:    pandn %xmm0, %xmm5
-; SSE-NEXT:    andps %xmm9, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm5
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm2, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm11[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm8[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[0,2]
-; SSE-NEXT:    andps %xmm6, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm15[0,0,1,1]
-; SSE-NEXT:    pandn %xmm0, %xmm6
-; SSE-NEXT:    por %xmm4, %xmm6
-; SSE-NEXT:    movlhps {{.*#+}} xmm8 = xmm8[0],xmm2[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,1],xmm2[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,0],xmm11[0,2]
-; SSE-NEXT:    andps %xmm9, %xmm8
-; SSE-NEXT:    pslldq {{.*#+}} xmm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm15[0,1,2,3,4,5]
-; SSE-NEXT:    pandn %xmm15, %xmm9
-; SSE-NEXT:    por %xmm8, %xmm9
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[1,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm4 = xmm4[0],xmm7[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[0,2]
+; SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm6 = xmm6[0],mem[0],xmm6[1],mem[1],xmm6[2],mem[2],xmm6[3],mem[3]
+; SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; SSE-NEXT:    movdqa %xmm13, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1],xmm6[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,2]
+; SSE-NEXT:    movdqa %xmm13, %xmm0
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; SSE-NEXT:    movaps %xmm6, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm13[1,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm13[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[0,2]
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movdqa %xmm9, (%rax)
-; SSE-NEXT:    movdqa %xmm6, 16(%rax)
-; SSE-NEXT:    movdqa %xmm5, 48(%rax)
-; SSE-NEXT:    movdqa %xmm1, 64(%rax)
-; SSE-NEXT:    movdqa %xmm3, 96(%rax)
-; SSE-NEXT:    movdqa %xmm7, 112(%rax)
-; SSE-NEXT:    movdqa %xmm10, 144(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 160(%rax)
-; SSE-NEXT:    movaps %xmm12, 32(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 80(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 128(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 176(%rax)
+; SSE-NEXT:    movaps %xmm2, (%rax)
+; SSE-NEXT:    movaps %xmm7, 16(%rax)
+; SSE-NEXT:    movaps %xmm1, 32(%rax)
+; SSE-NEXT:    movaps %xmm4, 48(%rax)
+; SSE-NEXT:    movaps %xmm5, 64(%rax)
+; SSE-NEXT:    movaps %xmm9, 80(%rax)
+; SSE-NEXT:    movaps %xmm3, 96(%rax)
+; SSE-NEXT:    movaps %xmm15, 112(%rax)
+; SSE-NEXT:    movaps %xmm14, 128(%rax)
+; SSE-NEXT:    movaps %xmm8, 144(%rax)
+; SSE-NEXT:    movaps %xmm11, 160(%rax)
+; SSE-NEXT:    movaps %xmm10, 176(%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm8
 ; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm0
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm9
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm1
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm12[2,2,3,3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm15
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm2
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm11
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[2,3,2,3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2],ymm4[3,4],ymm1[5],ymm4[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vmovdqa 16(%r8), %xmm4
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3],xmm4[4,5],xmm7[6,7]
-; AVX1-NEXT:    vmovdqa 16(%r9), %xmm7
-; AVX1-NEXT:    vpslld $16, %xmm7, %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm6[5],xmm0[6,7]
 ; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,2,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6],xmm1[7]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,6,5,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,4,6,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6],xmm2[7]
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm1
 ; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm15[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm8
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm0
 ; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm13[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm13, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX1-NEXT:    vmovdqa (%r8), %xmm2
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,6,5,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; AVX1-NEXT:    vextractf128 $1, %ymm14, %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm6[2,3,4,5],xmm0[6,7]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm14
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm2, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
 ; AVX1-NEXT:    vmovdqa (%r9), %xmm0
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,6,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3,4,5,6],xmm5[7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm6 = xmm14[0],xmm6[1],xmm14[2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm14 = xmm6[0,1,2],xmm5[3],xmm6[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm3
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm7[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm12 = xmm3[0,1,2],xmm6[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0,1],xmm4[0],xmm5[3]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,1,2,3,4,5]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm10 = xmm4[0,1,2,3,4],xmm5[5],xmm4[6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm7[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm8, %ymm8
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0],ymm6[1],ymm8[2,3],ymm6[4],ymm8[5,6],ymm6[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm3
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm6[0,1],xmm2[0],xmm6[3]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm6[5],xmm4[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,3,3]
+; AVX1-NEXT:    vmovdqa 16(%r9), %xmm10
+; AVX1-NEXT:    vmovdqa (%r8), %xmm6
+; AVX1-NEXT:    vmovdqa 16(%r8), %xmm5
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm9, %ymm9
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm9[2],ymm2[3,4],ymm9[5],ymm2[6,7]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm12 = xmm13[2,3,2,3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm12, %ymm12
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm9[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm6, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6],ymm12[7]
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm2
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[2,2,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[0,0,1,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm2, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm2[2],ymm12[3,4],ymm2[5],ymm12[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm8, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6],ymm5[7]
+; AVX1-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm5 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm5 = xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[2,2,3,3]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm5, %ymm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[2,3,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm13[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3,4],ymm1[5],ymm5[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm2[4,5],xmm5[6,7]
-; AVX1-NEXT:    vpslld $16, %xmm0, %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5],xmm5[6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,1,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6],xmm0[7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm13[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm13, %ymm3, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm3, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovdqa %xmm0, 32(%rax)
-; AVX1-NEXT:    vmovdqa %xmm5, 48(%rax)
-; AVX1-NEXT:    vmovdqa %xmm4, (%rax)
-; AVX1-NEXT:    vmovdqa %xmm3, 16(%rax)
-; AVX1-NEXT:    vmovdqa %xmm10, 96(%rax)
-; AVX1-NEXT:    vmovdqa %xmm12, 112(%rax)
-; AVX1-NEXT:    vmovdqa %xmm14, 64(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 80(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 160(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 176(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 128(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 144(%rax)
+; AVX1-NEXT:    vmovaps %ymm1, (%rax)
+; AVX1-NEXT:    vmovaps %ymm0, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm2, 160(%rax)
+; AVX1-NEXT:    vmovaps %ymm12, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm11, 32(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf16:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
 ; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm15
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm10
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm14
+; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm15
+; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm9
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm9[1,2,2,3]
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm7
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[1,2,2,3]
 ; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[0,1,2,1]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,5]
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
 ; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm1
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[2,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm2
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[0,2,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm12[0],ymm4[0],ymm12[1],ymm4[1],ymm12[2],ymm4[2],ymm12[3],ymm4[3],ymm12[8],ymm4[8],ymm12[9],ymm4[9],ymm12[10],ymm4[10],ymm12[11],ymm4[11]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm11[2,1,2,3,6,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm8[2,1,2,3,6,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm14[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[8],ymm12[8],ymm14[9],ymm12[9],ymm14[10],ymm12[10],ymm14[11],ymm12[11]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm12
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0,1],ymm4[2],ymm14[3,4],ymm4[5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm10[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0],ymm4[1,2],ymm14[3],ymm4[4,5],ymm14[6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm12[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm4, %ymm14, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX2-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm4
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm1[12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,2,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm3, %ymm4, %ymm14
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm8[4],ymm11[4],ymm8[5],ymm11[5],ymm8[6],ymm11[6],ymm8[7],ymm11[7],ymm8[12],ymm11[12],ymm8[13],ymm11[13],ymm8[14],ymm11[14],ymm8[15],ymm11[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm15[4],ymm9[4],ymm15[5],ymm9[5],ymm15[6],ymm9[6],ymm15[7],ymm9[7],ymm15[12],ymm9[12],ymm15[13],ymm9[13],ymm15[14],ymm9[14],ymm15[15],ymm9[15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,3,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm15[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm14[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm11[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm10[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm13[0],ymm6[0],ymm13[1],ymm6[1],ymm13[2],ymm6[2],ymm13[3],ymm6[3],ymm13[8],ymm6[8],ymm13[9],ymm6[9],ymm13[10],ymm6[10],ymm13[11],ymm6[11]
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm13
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7]
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[8],ymm6[8],ymm3[9],ymm6[9],ymm3[10],ymm6[10],ymm3[11],ymm6[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2,3],ymm4[4],ymm0[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,0,2,2]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
 ; AVX2-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,0,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm8[0],ymm11[0],ymm8[1],ymm11[1],ymm8[2],ymm11[2],ymm8[3],ymm11[3],ymm8[8],ymm11[8],ymm8[9],ymm11[9],ymm8[10],ymm11[10],ymm8[11],ymm11[11]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm15[0],ymm9[0],ymm15[1],ymm9[1],ymm15[2],ymm9[2],ymm15[3],ymm9[3],ymm15[8],ymm9[8],ymm15[9],ymm9[9],ymm15[10],ymm9[10],ymm15[11],ymm9[11]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,0,2,2,5,4,6,6]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3],ymm4[4],ymm1[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm10[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm12[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[1,0,2,2,5,4,6,6]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[8],ymm15[8],ymm14[9],ymm15[9],ymm14[10],ymm15[10],ymm14[11],ymm15[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm14[4],ymm15[4],ymm14[5],ymm15[5],ymm14[6],ymm15[6],ymm14[7],ymm15[7],ymm14[12],ymm15[12],ymm14[13],ymm15[13],ymm14[14],ymm15[14],ymm14[15],ymm15[15]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 160(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 160(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm14, 64(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 128(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 128(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 64(%rax)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX2-SLOW-NEXT:    vzeroupper
@@ -1086,111 +913,92 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 ;
 ; AVX2-FAST-ALL-LABEL: vf16:
 ; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm10
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm15
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm11
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm11
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm14
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm13
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %ymm15
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm8
 ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm0, %xmm5, %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm0, %xmm8, %xmm1
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm7
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
 ; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm7
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %xmm5
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[1,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm0
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3]
 ; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm1
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[2,1,3,3,4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %xmm2
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm8 = xmm2[0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm13, %ymm3, %ymm8, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,u,u,2,u,u,3,u>
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm3, %ymm6, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,1,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6],ymm6[7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm9 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
 ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm3, %ymm10, %ymm12
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm3, %ymm9, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm12[0],ymm3[1],ymm12[1],ymm3[2],ymm12[2],ymm3[3],ymm12[3],ymm3[8],ymm12[8],ymm3[9],ymm12[9],ymm3[10],ymm12[10],ymm3[11],ymm12[11]
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm14 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[8],ymm12[8],ymm14[9],ymm12[9],ymm14[10],ymm12[10],ymm14[11],ymm12[11]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %ymm12
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm3, %ymm11, %ymm6
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm3, %ymm10, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[8],ymm6[8],ymm3[9],ymm6[9],ymm3[10],ymm6[10],ymm3[11],ymm6[11]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[1,2,2,3,5,6,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm12 = ymm13[1,2,2,3,5,6,6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm12[0],ymm6[0],ymm12[1],ymm6[1],ymm12[2],ymm6[2],ymm12[3],ymm6[3],ymm12[8],ymm6[8],ymm12[9],ymm6[9],ymm12[10],ymm6[10],ymm12[11],ymm6[11]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm12
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm14[2],ymm3[3,4],ymm14[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm11[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0],ymm3[1,2],ymm14[3],ymm3[4,5],ymm14[6],ymm3[7]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm13, %ymm3, %ymm14, %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,u,u,2,u,u,3,u>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm3, %ymm14, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[1,1,1,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm14[2],ymm3[3,4],ymm14[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm14 = xmm1[12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm14[1],ymm3[2,3],ymm14[4],ymm3[5,6],ymm14[7]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm14 = xmm2[8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm14, %ymm14
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm15[4],ymm4[4],ymm15[5],ymm4[5],ymm15[6],ymm4[6],ymm15[7],ymm4[7],ymm15[12],ymm4[12],ymm15[13],ymm4[13],ymm15[14],ymm4[14],ymm15[15],ymm4[15]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm13 = <5,u,u,6,u,u,7,u>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm3, %ymm13, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm13 = ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[3,3,3,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3,4],ymm13[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm13 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm13[1],ymm3[2,3],ymm13[4],ymm3[5,6],ymm13[7]
-; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm13 = ymm12[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm13, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [1,0,2,2,1,0,2,2]
-; AVX2-FAST-ALL-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm7, %ymm0
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6],ymm0[7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,0,2,1,4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm4[0],ymm15[1],ymm4[1],ymm15[2],ymm4[2],ymm15[3],ymm4[3],ymm15[8],ymm4[8],ymm15[9],ymm4[9],ymm15[10],ymm4[10],ymm15[11],ymm4[11]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,4,u,u,5,u,u,6>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm4, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm11[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm12[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[8],ymm6[8],ymm2[9],ymm6[9],ymm2[10],ymm6[10],ymm2[11],ymm6[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,0,2,2,1,0,2,2]
+; AVX2-FAST-ALL-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[8],ymm12[8],ymm14[9],ymm12[9],ymm14[10],ymm12[10],ymm14[11],ymm12[11]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,4,u,u,5,u,u,6>
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[8],ymm15[8],ymm13[9],ymm15[9],ymm13[10],ymm15[10],ymm13[11],ymm15[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm14[4],ymm12[4],ymm14[5],ymm12[5],ymm14[6],ymm12[6],ymm14[7],ymm12[7],ymm14[12],ymm12[12],ymm14[13],ymm12[13],ymm14[14],ymm12[14],ymm14[15],ymm12[15]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u>
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm13[4],ymm15[4],ymm13[5],ymm15[5],ymm13[6],ymm15[6],ymm13[7],ymm15[7],ymm13[12],ymm15[12],ymm13[13],ymm15[13],ymm13[14],ymm15[14],ymm13[15],ymm15[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[3,3,3,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
 ; AVX2-FAST-ALL-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, 160(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm3, 160(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm14, 64(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 128(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm3, 128(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm9, 64(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX2-FAST-ALL-NEXT:    vzeroupper
@@ -1198,119 +1006,93 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 ;
 ; AVX2-FAST-PERLANE-LABEL: vf16:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm13
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm15
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm6, %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm0[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[1,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[2,1,3,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm8 = xmm6[0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm3, %ymm8, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm10, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm15, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm15, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm14[0],ymm3[1],ymm14[1],ymm3[2],ymm14[2],ymm3[3],ymm14[3],ymm3[8],ymm14[8],ymm3[9],ymm14[9],ymm3[10],ymm14[10],ymm3[11],ymm14[11]
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm14 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[1],ymm14[1],ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[8],ymm14[8],ymm15[9],ymm14[9],ymm15[10],ymm14[10],ymm15[11],ymm14[11]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm14[2],ymm3[3,4],ymm14[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm12, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0],ymm3[1,2],ymm14[3],ymm3[4,5],ymm14[6],ymm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm14 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm3, %ymm14, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,1,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm4, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0],ymm8[1],ymm2[2,3],ymm8[4],ymm2[5,6],ymm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3,4],ymm4[5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,2,3,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm12 = xmm9[12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3],ymm12[4],ymm4[5,6],ymm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm12 = xmm6[8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm14 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm14, %ymm4, %ymm12, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm12 = ymm8[4],ymm10[4],ymm8[5],ymm10[5],ymm8[6],ymm10[6],ymm8[7],ymm10[7],ymm8[12],ymm10[12],ymm8[13],ymm10[13],ymm8[14],ymm10[14],ymm8[15],ymm10[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[3,3,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm13[4],ymm11[4],ymm13[5],ymm11[5],ymm13[6],ymm11[6],ymm13[7],ymm11[7],ymm13[12],ymm11[12],ymm13[13],ymm11[13],ymm13[14],ymm11[14],ymm13[15],ymm11[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2],ymm8[3,4],ymm12[5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2,3],ymm12[4],ymm8[5,6],ymm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm15[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm14, %ymm8, %ymm12, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[1,1,1,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm10 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[8],ymm15[8],ymm13[9],ymm15[9],ymm13[10],ymm15[10],ymm13[11],ymm15[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm9[0],ymm14[1],ymm9[1],ymm14[2],ymm9[2],ymm14[3],ymm9[3],ymm14[8],ymm9[8],ymm14[9],ymm9[9],ymm14[10],ymm9[10],ymm14[11],ymm9[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,0,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm3[0],ymm10[0],ymm3[1],ymm10[1],ymm3[2],ymm10[2],ymm3[3],ymm10[3],ymm3[8],ymm10[8],ymm3[9],ymm10[9],ymm3[10],ymm10[10],ymm3[11],ymm10[11]
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm13[0],ymm11[0],ymm13[1],ymm11[1],ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[8],ymm11[8],ymm13[9],ymm11[9],ymm13[10],ymm11[10],ymm13[11],ymm11[11]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[1,0,2,2,5,4,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3],ymm5[4],ymm1[5,6],ymm5[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm4[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm15[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm1, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, 160(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 128(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm14[4],ymm9[4],ymm14[5],ymm9[5],ymm14[6],ymm9[6],ymm14[7],ymm9[7],ymm14[12],ymm9[12],ymm14[13],ymm9[13],ymm14[14],ymm9[14],ymm14[15],ymm9[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm13[4],ymm15[4],ymm13[5],ymm15[5],ymm13[6],ymm15[6],ymm13[7],ymm15[7],ymm13[12],ymm15[12],ymm13[13],ymm15[13],ymm13[14],ymm15[14],ymm13[15],ymm15[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm15, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm13, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm3 = ymm9[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 128(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 160(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 96(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, (%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm10, 64(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
@@ -1322,21 +1104,21 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
 ; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
-; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
+; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,16,32,48,u,u,1,17,33,49,u,u,2,18,34,50,u,u,3,19,35,51,u,u,4,20,36,52,u,u,5,21>
 ; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,32,48,6,7,8,9,33,49,12,13,14,15,34,50,18,19,20,21,35,51,24,25,26,27,36,52,30,31]
 ; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,21,u,u,38,54,6,22,u,u,39,55,7,23,u,u,40,56,8,24,u,u,41,57,9,25,u,u,42,58,10,26>
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm1, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,37,53,4,5,6,7,38,54,10,11,12,13,39,55,16,17,18,19,40,56,22,23,24,25,41,57,28,29,30,31]
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,u,11,27,43,59,u,u,12,28,44,60,u,u,13,29,45,61,u,u,14,30,46,62,u,u,15,31,47,63,u,u>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [42,58,2,3,4,5,43,59,8,9,10,11,44,60,14,15,16,17,45,61,20,21,22,23,46,62,26,27,28,29,47,63]
-; AVX512-NEXT:    vpermi2w %zmm2, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,21,37,53,u,u,6,22,38,54,u,u,7,23,39,55,u,u,8,24,40,56,u,u,9,25,41,57,u,u,10,26>
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,38,54,6,7,8,9,39,55,12,13,14,15,40,56,18,19,20,21,41,57,24,25,26,27,42,58,30,31]
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <10,26,43,59,u,u,11,27,44,60,u,u,12,28,45,61,u,u,13,29,46,62,u,u,14,30,47,63,u,u,15,31>
+; AVX512-NEXT:    vpermi2w %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,43,59,6,7,8,9,44,60,12,13,14,15,45,61,18,19,20,21,46,62,24,25,26,27,47,63,30,31]
+; AVX512-NEXT:    vpermi2w %zmm1, %zmm3, %zmm0
 ; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm4, (%rax)
@@ -1365,1634 +1147,1143 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
 define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>* %in.vecptr2, <32 x i16>* %in.vecptr3, <32 x i16>* %in.vecptr4, <32 x i16>* %in.vecptr5, <192 x i16>* %out.vec) nounwind {
 ; SSE-LABEL: vf32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    subq $296, %rsp # imm = 0x128
-; SSE-NEXT:    movdqa (%rdi), %xmm0
-; SSE-NEXT:    movdqa 16(%rdi), %xmm13
-; SSE-NEXT:    movdqa (%rsi), %xmm2
-; SSE-NEXT:    movdqa 16(%rsi), %xmm14
+; SSE-NEXT:    subq $104, %rsp
+; SSE-NEXT:    movdqa (%rdi), %xmm9
+; SSE-NEXT:    movdqa 16(%rdi), %xmm12
+; SSE-NEXT:    movdqa (%rsi), %xmm10
+; SSE-NEXT:    movdqa 16(%rsi), %xmm8
 ; SSE-NEXT:    movdqa (%rdx), %xmm1
-; SSE-NEXT:    movdqa 16(%rdx), %xmm9
-; SSE-NEXT:    movdqa (%rcx), %xmm7
-; SSE-NEXT:    movdqa 16(%rcx), %xmm15
+; SSE-NEXT:    movdqa 16(%rdx), %xmm6
+; SSE-NEXT:    movdqa (%rcx), %xmm13
+; SSE-NEXT:    movdqa 16(%rcx), %xmm11
 ; SSE-NEXT:    movdqa (%r8), %xmm5
-; SSE-NEXT:    movdqa (%r9), %xmm12
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm0, %xmm8
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
-; SSE-NEXT:    movdqa %xmm8, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm4[3,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm5[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,2],xmm4[0,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0,1,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm10 = [65535,0,65535,65535,65535,65535,65535,0]
-; SSE-NEXT:    andps %xmm10, %xmm3
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm12[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; SSE-NEXT:    movaps %xmm10, %xmm4
-; SSE-NEXT:    andnps %xmm6, %xmm4
-; SSE-NEXT:    orps %xmm3, %xmm4
-; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,3],xmm1[3,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,2],xmm3[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm2
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
-; SSE-NEXT:    movaps %xmm10, %xmm4
-; SSE-NEXT:    andnps %xmm3, %xmm4
-; SSE-NEXT:    orps %xmm2, %xmm4
-; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm9, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm13, %xmm11
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3]
-; SSE-NEXT:    movdqa %xmm11, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm0[3,3]
-; SSE-NEXT:    movdqa 16(%r8), %xmm4
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,2],xmm6[0,1]
-; SSE-NEXT:    movdqa 16(%r9), %xmm0
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm0[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; SSE-NEXT:    movaps %xmm10, %xmm7
-; SSE-NEXT:    andnps %xmm6, %xmm7
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm3
-; SSE-NEXT:    orps %xmm3, %xmm7
-; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm13, %xmm3
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm9[3,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm4[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,2],xmm6[2,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
-; SSE-NEXT:    movaps %xmm10, %xmm0
-; SSE-NEXT:    andnps %xmm6, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm3
-; SSE-NEXT:    orps %xmm3, %xmm0
-; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 32(%rdx), %xmm2
-; SSE-NEXT:    movdqa 32(%rcx), %xmm15
-; SSE-NEXT:    movdqa %xmm2, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 32(%rdi), %xmm9
-; SSE-NEXT:    movdqa 32(%rsi), %xmm14
-; SSE-NEXT:    movdqa %xmm9, %xmm3
+; SSE-NEXT:    movdqa (%r9), %xmm14
+; SSE-NEXT:    movdqa %xmm9, %xmm4
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; SSE-NEXT:    movdqa %xmm5, %xmm3
 ; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm0[3,3]
-; SSE-NEXT:    movdqa 32(%r8), %xmm6
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm6[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,2],xmm0[0,1]
-; SSE-NEXT:    movdqa 32(%r9), %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    movdqa %xmm1, %xmm7
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE-NEXT:    movaps %xmm10, %xmm1
-; SSE-NEXT:    andnps %xmm0, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm3
-; SSE-NEXT:    orps %xmm3, %xmm1
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm9, %xmm0
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3],xmm2[3,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm3[2,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm7[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
-; SSE-NEXT:    movaps %xmm10, %xmm1
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm0
-; SSE-NEXT:    orps %xmm0, %xmm1
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 48(%rdx), %xmm0
-; SSE-NEXT:    movdqa 48(%rcx), %xmm3
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 48(%rdi), %xmm2
-; SSE-NEXT:    movdqa 48(%rsi), %xmm14
-; SSE-NEXT:    movdqa %xmm2, %xmm15
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; SSE-NEXT:    movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[2,3],xmm1[3,3]
-; SSE-NEXT:    movdqa 48(%r8), %xmm9
-; SSE-NEXT:    pshuflw {{.*#+}} xmm13 = xmm9[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,2],xmm13[0,1]
-; SSE-NEXT:    movdqa 48(%r9), %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm1[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    movdqa %xmm1, %xmm13
-; SSE-NEXT:    movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
-; SSE-NEXT:    movaps %xmm10, %xmm1
-; SSE-NEXT:    andnps %xmm7, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm15
-; SSE-NEXT:    orps %xmm15, %xmm1
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,3],xmm1[3,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm9[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,2],xmm7[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0,1,3]
-; SSE-NEXT:    andps %xmm10, %xmm2
-; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm13[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
-; SSE-NEXT:    andnps %xmm7, %xmm10
-; SSE-NEXT:    orps %xmm2, %xmm10
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    movdqa %xmm3, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm4[1,3]
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3]
 ; SSE-NEXT:    movdqa %xmm2, %xmm0
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0]
-; SSE-NEXT:    movdqa %xmm5, %xmm7
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm8[1,3]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[0,2]
-; SSE-NEXT:    movdqa %xmm12, %xmm7
-; SSE-NEXT:    pslldq {{.*#+}} xmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,1,2,3,4,5]
-; SSE-NEXT:    movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm14, %xmm1
-; SSE-NEXT:    pandn %xmm7, %xmm1
-; SSE-NEXT:    andps %xmm14, %xmm0
-; SSE-NEXT:    por %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm2[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm5[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm8[0,2]
-; SSE-NEXT:    movdqa {{.*#+}} xmm15 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm12[0,0,1,1]
-; SSE-NEXT:    movdqa %xmm15, %xmm0
-; SSE-NEXT:    pandn %xmm8, %xmm0
-; SSE-NEXT:    andps %xmm15, %xmm7
-; SSE-NEXT:    por %xmm7, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm3, %xmm7
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0]
-; SSE-NEXT:    movdqa %xmm5, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,1],xmm0[1,3]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm1[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm12[2,2,3,3]
-; SSE-NEXT:    pslld $16, %xmm12
-; SSE-NEXT:    movdqa %xmm14, %xmm0
-; SSE-NEXT:    pandn %xmm12, %xmm0
-; SSE-NEXT:    andps %xmm14, %xmm7
-; SSE-NEXT:    por %xmm7, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
-; SSE-NEXT:    psrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm3[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm2[0,2]
-; SSE-NEXT:    movdqa %xmm15, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm15, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm11[0]
-; SSE-NEXT:    movdqa %xmm4, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm11[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm5[0,2]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    pslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm5[0,1,2,3,4,5]
-; SSE-NEXT:    movdqa %xmm14, %xmm3
-; SSE-NEXT:    pandn %xmm5, %xmm3
-; SSE-NEXT:    andps %xmm14, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm2[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm4[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm11[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[0,0,1,1]
-; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    movdqa %xmm15, %xmm0
-; SSE-NEXT:    pandn %xmm7, %xmm0
-; SSE-NEXT:    andps %xmm15, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm5, %xmm1
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movdqa %xmm4, %xmm7
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,1],xmm0[1,3]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm7[0,2]
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm7 = xmm7[1],xmm2[1]
 ; SSE-NEXT:    movdqa %xmm3, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; SSE-NEXT:    pslld $16, %xmm0
-; SSE-NEXT:    movdqa %xmm14, %xmm7
-; SSE-NEXT:    pandn %xmm0, %xmm7
-; SSE-NEXT:    andps %xmm14, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm7
-; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm5[1]
-; SSE-NEXT:    psrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm5[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[0,2]
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 16(%r8), %xmm15
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,1],xmm3[2,3]
+; SSE-NEXT:    movdqa 16(%r9), %xmm3
 ; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm2[0,2]
-; SSE-NEXT:    movdqa %xmm15, %xmm8
-; SSE-NEXT:    pandn %xmm3, %xmm8
-; SSE-NEXT:    andps %xmm15, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm8
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm4, %xmm1
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; SSE-NEXT:    movdqa %xmm5, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[1,3]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm9[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm9, %xmm0
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm5, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm5[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[3,1],xmm5[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,0],xmm1[0,2]
+; SSE-NEXT:    movaps %xmm9, (%rsp) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm12, %xmm13
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm13[1,3]
 ; SSE-NEXT:    movdqa %xmm6, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,3]
-; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[0,2]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    pslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm5[0,1,2,3,4,5]
-; SSE-NEXT:    movdqa %xmm14, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    andps %xmm14, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm7
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm6[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,1,1]
-; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    movdqa %xmm15, %xmm11
-; SSE-NEXT:    pandn %xmm5, %xmm11
-; SSE-NEXT:    andps %xmm15, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm11
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm4, %xmm5
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm13[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[0,2]
+; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm13, %xmm1
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm2[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[0,2]
+; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,3],xmm0[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[3,1],xmm0[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[2,0],xmm2[0,2]
+; SSE-NEXT:    movdqa 32(%rdi), %xmm10
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm3[4],xmm15[5],xmm3[5],xmm15[6],xmm3[6],xmm15[7],xmm3[7]
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm12[1,3]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
 ; SSE-NEXT:    movdqa %xmm6, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,1],xmm0[1,3]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm1[0,2]
-; SSE-NEXT:    movdqa %xmm3, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm13 = xmm3[2,2,3,3]
-; SSE-NEXT:    pslld $16, %xmm0
-; SSE-NEXT:    movdqa %xmm14, %xmm3
-; SSE-NEXT:    pandn %xmm0, %xmm3
-; SSE-NEXT:    andps %xmm14, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm3
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1]
-; SSE-NEXT:    psrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,1],xmm4[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[0,2]
-; SSE-NEXT:    movdqa %xmm15, %xmm12
-; SSE-NEXT:    pandn %xmm13, %xmm12
-; SSE-NEXT:    andps %xmm15, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm12
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
-; SSE-NEXT:    movdqa %xmm9, %xmm6
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm6[0,2]
-; SSE-NEXT:    movdqa (%rsp), %xmm4 # 16-byte Reload
-; SSE-NEXT:    movdqa %xmm4, %xmm6
-; SSE-NEXT:    pslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5]
-; SSE-NEXT:    movdqa %xmm14, %xmm13
-; SSE-NEXT:    pandn %xmm6, %xmm13
-; SSE-NEXT:    andps %xmm14, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm13
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm9[1,1,1,1,4,5,6,7]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[0,2]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,1,1]
-; SSE-NEXT:    movdqa %xmm15, %xmm6
-; SSE-NEXT:    pandn %xmm1, %xmm6
-; SSE-NEXT:    andps %xmm15, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm6
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm2, %xmm1
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm12[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm12, %xmm0
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm6[1]
+; SSE-NEXT:    movdqa %xmm15, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm6[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 32(%rsi), %xmm8
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm15[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[3,1],xmm15[2,3]
+; SSE-NEXT:    movdqa 32(%r8), %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[2,0],xmm6[0,2]
+; SSE-NEXT:    movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm10, %xmm9
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; SSE-NEXT:    movdqa 32(%r9), %xmm3
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm9[1,3]
+; SSE-NEXT:    movdqa 32(%rdx), %xmm7
+; SSE-NEXT:    movdqa 32(%rcx), %xmm6
+; SSE-NEXT:    movdqa %xmm7, %xmm1
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm9[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm5[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movdqa %xmm9, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,1],xmm0[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm5[0,2]
-; SSE-NEXT:    andps %xmm14, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[2,2,3,3]
-; SSE-NEXT:    pslld $16, %xmm4
-; SSE-NEXT:    pandn %xmm4, %xmm14
-; SSE-NEXT:    por %xmm1, %xmm14
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
-; SSE-NEXT:    psrldq {{.*#+}} xmm9 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[1,1],xmm2[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,0],xmm0[0,2]
-; SSE-NEXT:    andps %xmm15, %xmm9
-; SSE-NEXT:    pandn %xmm5, %xmm15
-; SSE-NEXT:    por %xmm9, %xmm15
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm4, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm5[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm4[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[3,1],xmm4[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,0],xmm1[0,2]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm10[1,3]
+; SSE-NEXT:    movdqa %xmm7, %xmm12
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm12 = xmm12[0],xmm10[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[2,0],xmm2[0,2]
+; SSE-NEXT:    movdqa %xmm10, %xmm2
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm7[1]
+; SSE-NEXT:    movdqa %xmm0, %xmm11
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,1],xmm7[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[2,0],xmm2[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm0[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[3,1],xmm0[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[2,0],xmm7[0,2]
+; SSE-NEXT:    movdqa 48(%rdi), %xmm0
+; SSE-NEXT:    movdqa 48(%rsi), %xmm1
+; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE-NEXT:    movdqa 48(%r8), %xmm7
+; SSE-NEXT:    movdqa 48(%r9), %xmm15
+; SSE-NEXT:    movdqa %xmm7, %xmm4
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3]
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[1,3]
+; SSE-NEXT:    movdqa 48(%rdx), %xmm1
+; SSE-NEXT:    movdqa 48(%rcx), %xmm14
+; SSE-NEXT:    movdqa %xmm1, %xmm6
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
+; SSE-NEXT:    movdqa %xmm6, %xmm8
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm2[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,0],xmm5[0,2]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm6[1]
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm6[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm4[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,1],xmm4[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[0,2]
+; SSE-NEXT:    punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm15[4],xmm7[5],xmm15[5],xmm7[6],xmm15[6],xmm7[7],xmm15[7]
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE-NEXT:    movdqa %xmm7, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3]
+; SSE-NEXT:    movdqa %xmm1, %xmm4
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[0,2]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm7, %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[1,1]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm3[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm7[2,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,2]
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movdqa %xmm15, 352(%rax)
-; SSE-NEXT:    movdqa %xmm14, 336(%rax)
-; SSE-NEXT:    movdqa %xmm6, 304(%rax)
-; SSE-NEXT:    movdqa %xmm13, 288(%rax)
-; SSE-NEXT:    movdqa %xmm12, 256(%rax)
-; SSE-NEXT:    movdqa %xmm3, 240(%rax)
-; SSE-NEXT:    movdqa %xmm11, 208(%rax)
-; SSE-NEXT:    movdqa %xmm7, 192(%rax)
-; SSE-NEXT:    movdqa %xmm8, 160(%rax)
+; SSE-NEXT:    movaps %xmm0, 368(%rax)
+; SSE-NEXT:    movaps %xmm6, 352(%rax)
+; SSE-NEXT:    movaps %xmm4, 336(%rax)
+; SSE-NEXT:    movaps %xmm2, 320(%rax)
+; SSE-NEXT:    movaps %xmm5, 304(%rax)
+; SSE-NEXT:    movaps %xmm8, 288(%rax)
+; SSE-NEXT:    movaps %xmm10, 272(%rax)
+; SSE-NEXT:    movaps %xmm11, 256(%rax)
+; SSE-NEXT:    movaps %xmm12, 240(%rax)
+; SSE-NEXT:    movaps %xmm9, 224(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 208(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 192(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 176(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 160(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 144(%rax)
+; SSE-NEXT:    movaps %xmm13, 128(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 112(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 96(%rax)
+; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 80(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 64(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 48(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 32(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 16(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, (%rax)
-; SSE-NEXT:    movaps %xmm10, 368(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 320(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 272(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 224(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 176(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 128(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 80(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 32(%rax)
-; SSE-NEXT:    addq $296, %rsp # imm = 0x128
+; SSE-NEXT:    addq $104, %rsp
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $184, %rsp
-; AVX1-NEXT:    vmovdqa 32(%rcx), %xmm8
-; AVX1-NEXT:    vmovdqa 48(%rcx), %xmm0
-; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm10
-; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm1
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm13[2,2,3,3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm11
-; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm2
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm12
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm6
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm14[2,3,2,3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7]
-; AVX1-NEXT:    vmovdqa 48(%r8), %xmm6
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,1,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm3 = xmm0[0],xmm7[1,2],xmm0[3]
-; AVX1-NEXT:    vmovdqa 48(%r9), %xmm0
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5],xmm3[6,7]
-; AVX1-NEXT:    vpslld $16, %xmm0, %xmm4
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5],xmm3[6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,6,5,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6],xmm2[7]
+; AVX1-NEXT:    subq $88, %rsp
+; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm1
 ; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm9, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX1-NEXT:    vmovdqa 32(%r8), %xmm7
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm3[0],xmm1[1],xmm3[2,3]
-; AVX1-NEXT:    vmovdqa 32(%r9), %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm5[3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,6,5,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3,4,5],xmm4[6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,6,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3,4,5,6],xmm4[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm3
 ; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm13[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm14[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm14, %ymm4, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm4 = xmm3[0,1],xmm6[0],xmm3[3]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5],xmm4[6,7]
+; AVX1-NEXT:    vmovdqa 32(%rsi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm4
 ; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = xmm3[0,1],xmm7[0],xmm3[3]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5],xmm5[6,7]
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm3, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
+; AVX1-NEXT:    vmovdqa 16(%r9), %xmm5
 ; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX1-NEXT:    vmovdqa 16(%r8), %xmm6
+; AVX1-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 32(%r8), %xmm4
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm14 = xmm9[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm14, %ymm14
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm14[2],ymm0[3,4],ymm14[5],ymm0[6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[2,3,2,3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm9[0,1,0,1]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm7[2,1,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm3 = xmm3[0],xmm0[1,2],xmm3[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[0,2,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3,4,5,6],xmm4[7]
-; AVX1-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4,5],xmm0[6,7]
-; AVX1-NEXT:    vpslld $16, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
+; AVX1-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX1-NEXT:    vmovdqa 32(%rdx), %xmm0
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm8[2,2,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[0,0,1,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm13, %ymm4, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovdqa 48(%rcx), %xmm13
+; AVX1-NEXT:    vmovdqa 48(%rdx), %xmm7
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm3
+; AVX1-NEXT:    vmovdqa 48(%r9), %xmm5
+; AVX1-NEXT:    vmovdqa 48(%r8), %xmm4
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm2, %ymm12
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0],ymm12[1],ymm3[2,3],ymm12[4],ymm3[5,6],ymm12[7]
+; AVX1-NEXT:    vmovdqa 48(%rsi), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm11 = xmm0[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm11[2],ymm12[3,4],ymm11[5],ymm12[6,7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm8[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm14[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm14, %ymm4, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6],ymm1[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm15, %ymm4, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX1-NEXT:    vmovdqa 16(%r8), %xmm3
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm5 = xmm2[0],xmm0[1,2],xmm2[3]
-; AVX1-NEXT:    vmovdqa 16(%r9), %xmm2
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm2[0,2,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3,4,5,6],xmm6[7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5],xmm0[6,7]
-; AVX1-NEXT:    vpslld $16, %xmm2, %xmm5
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5],xmm0[6,7]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,5,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,6,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6],xmm1[7]
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm14
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm11[4],xmm14[4],xmm11[5],xmm14[5],xmm11[6],xmm14[6],xmm11[7],xmm14[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm10[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[2,2,3,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm15 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm9
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm8
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX1-NEXT:    vmovdqa (%r8), %xmm1
-; AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT:    vblendps {{.*#+}} xmm13 = xmm12[0],xmm0[1],xmm12[2,3]
-; AVX1-NEXT:    vmovdqa (%r9), %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm13[0,1,2],xmm15[3],xmm13[4,5,6,7]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm12, %xmm12
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,6,5,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm12[2,3,4,5],xmm7[6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,4,6,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm13 = xmm7[0],xmm6[1],xmm7[2,3,4,5,6],xmm6[7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm5[0,0,1,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm5[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = xmm6[0,1],xmm3[0],xmm6[3]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm15 = xmm7[0,1,2,3,4],xmm5[5],xmm7[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm5
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm12 = xmm3[0,1,2],xmm2[3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
+; AVX1-NEXT:    vmovdqa (%r9), %xmm4
+; AVX1-NEXT:    vmovdqa (%r8), %xmm5
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7]
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm6
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm7
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,2,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[0,0,1,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm7, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm8, %ymm8
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3],ymm5[4],ymm8[5,6],ymm5[7]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = xmm5[0,1],xmm1[0],xmm5[3]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3,4],xmm2[5],xmm7[6,7]
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm5
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2,3],xmm5[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3],xmm5[4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm10[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm3, %ymm3
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[2,1,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
-; AVX1-NEXT:    vblendps {{.*#+}} xmm4 = xmm4[0],xmm3[1,2],xmm4[3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,2,2,3,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5,6],xmm6[7]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT:    vpslld $16, %xmm0, %xmm0
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm5 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm5 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm2, %ymm7
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm7 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm10[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm9[2,3,2,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm7, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2,3],ymm6[4],ymm2[5,6],ymm6[7]
+; AVX1-NEXT:    vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = mem[2,2,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm5, %ymm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm8[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm5, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6],ymm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm5, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovdqa %xmm0, 48(%rax)
-; AVX1-NEXT:    vmovdqa %xmm4, 32(%rax)
-; AVX1-NEXT:    vmovdqa %xmm5, 16(%rax)
-; AVX1-NEXT:    vmovdqa %xmm2, (%rax)
-; AVX1-NEXT:    vmovdqa %xmm12, 112(%rax)
-; AVX1-NEXT:    vmovdqa %xmm15, 96(%rax)
-; AVX1-NEXT:    vmovdqa %xmm13, 80(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 64(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 176(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 160(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 144(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 128(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 240(%rax)
-; AVX1-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 224(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 208(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 192(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 304(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 288(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 272(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 256(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 368(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 352(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 336(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 320(%rax)
-; AVX1-NEXT:    addq $184, %rsp
+; AVX1-NEXT:    vmovaps %ymm0, (%rax)
+; AVX1-NEXT:    vmovaps %ymm2, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm4, 160(%rax)
+; AVX1-NEXT:    vmovaps %ymm3, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm11, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm15, 288(%rax)
+; AVX1-NEXT:    vmovaps %ymm14, 192(%rax)
+; AVX1-NEXT:    vmovaps %ymm13, 320(%rax)
+; AVX1-NEXT:    vmovaps %ymm12, 352(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 96(%rax)
+; AVX1-NEXT:    addq $88, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: vf32:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm9
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    subq $296, %rsp # imm = 0x128
+; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm10
+; AVX2-SLOW-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %xmm9
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm9[1,2,2,3]
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm11
+; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %xmm8
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[1,2,2,3]
 ; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm7
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm15
+; AVX2-SLOW-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[0,1,2,1]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm12
+; AVX2-SLOW-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[2,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm2
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm1
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX2-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vmovdqa %xmm9, %xmm11
-; AVX2-SLOW-NEXT:    vmovdqa %xmm9, (%rsp) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[0,1,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,1,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %xmm2
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[1,2,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm11[1,2,2,3]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm12[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7]
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX2-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,3,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %ymm4
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm0[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11]
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm0[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7]
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm10
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm4
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm10[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[8],ymm0[8],ymm15[9],ymm0[9],ymm15[10],ymm0[10],ymm15[11],ymm0[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,0,2,2]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,2,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX2-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm11[4],xmm3[4],xmm11[5],xmm3[5],xmm11[6],xmm3[6],xmm11[7],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm13, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,3,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm1
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, %ymm9
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm6
+; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm5[1,2,2,3,5,6,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[1,2,2,3,5,6,6,7]
 ; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm13[2,1,2,3,6,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,1,2,3,6,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm11[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[1],ymm1[1],ymm8[2],ymm1[2],ymm8[3],ymm1[3],ymm8[8],ymm1[8],ymm8[9],ymm1[9],ymm8[10],ymm1[10],ymm8[11],ymm1[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[2,1,2,3,6,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[2,1,2,3,6,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[1],ymm1[1],ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[8],ymm1[8],ymm5[9],ymm1[9],ymm5[10],ymm1[10],ymm5[11],ymm1[11]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm12, %ymm5, %ymm13, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm3, %xmm13
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm4[0],ymm13[1],ymm4[2,3],ymm13[4],ymm4[5,6],ymm13[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,2,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm4[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm13, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2],ymm7[3,4],ymm0[5],ymm7[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm5
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm14[2,3,2,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm0, %ymm5, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
-; AVX2-SLOW-NEXT:    vmovdqa %ymm9, %ymm10
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm5
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm6[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm0, %ymm5, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm2[4],ymm15[4],ymm2[5],ymm15[5],ymm2[6],ymm15[6],ymm2[7],ymm15[7],ymm2[12],ymm15[12],ymm2[13],ymm15[13],ymm2[14],ymm15[14],ymm2[15],ymm15[15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm13 = ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm7
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5,6],ymm7[7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm0, %ymm7, %ymm15
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm4 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpcklwd (%rsp), %xmm0, %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2,3],ymm7[4],ymm4[5,6],ymm7[7]
-; AVX2-SLOW-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = mem[0,0,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm4, %ymm7, %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm7[2],ymm0[3,4],ymm7[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[0,0,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm15[0],ymm9[0],ymm15[1],ymm9[1],ymm15[2],ymm9[2],ymm15[3],ymm9[3],ymm15[8],ymm9[8],ymm15[9],ymm9[9],ymm15[10],ymm9[10],ymm15[11],ymm9[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[1,0,2,2]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7]
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX2-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm0, %ymm7, %ymm0
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[8],ymm13[8],ymm11[9],ymm13[9],ymm11[10],ymm13[10],ymm11[11],ymm13[11]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[1,0,2,2,5,4,6,6]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm14[1],ymm7[2,3],ymm14[4],ymm7[5,6],ymm14[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm8[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm12[2],ymm7[3,4],ymm12[5],ymm7[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm6[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm7, %ymm11, %ymm7
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = ymm6[0],mem[0],ymm6[1],mem[1],ymm6[2],mem[2],ymm6[3],mem[3],ymm6[8],mem[8],ymm6[9],mem[9],ymm6[10],mem[10],ymm6[11],mem[11]
-; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,0,2,2,5,4,6,6]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3],ymm8[4],ymm6[5,6],ymm8[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[1,0,2,2,5,4,6,6]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6],ymm9[7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu (%rsp), %ymm3 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm15 = ymm10[4],ymm4[4],ymm10[5],ymm4[5],ymm10[6],ymm4[6],ymm10[7],ymm4[7],ymm10[12],ymm4[12],ymm10[13],ymm4[13],ymm10[14],ymm4[14],ymm10[15],ymm4[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0],ymm7[1],ymm15[2,3],ymm7[4],ymm15[5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm15 = ymm14[4],ymm12[4],ymm14[5],ymm12[5],ymm14[6],ymm12[6],ymm14[7],ymm12[7],ymm14[12],ymm12[12],ymm14[13],ymm12[13],ymm14[14],ymm12[14],ymm14[15],ymm12[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[8],ymm12[8],ymm14[9],ymm12[9],ymm14[10],ymm12[10],ymm14[11],ymm12[11]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm10[0],ymm4[0],ymm10[1],ymm4[1],ymm10[2],ymm4[2],ymm10[3],ymm4[3],ymm10[8],ymm4[8],ymm10[9],ymm4[9],ymm10[10],ymm4[10],ymm10[11],ymm4[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm12[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,0,2,2,5,4,6,6]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0],ymm4[1],ymm10[2,3],ymm4[4],ymm10[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm10[2],ymm4[3,4],ymm10[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[2,1,2,3,6,5,6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm11[4],ymm13[4],ymm11[5],ymm13[5],ymm11[6],ymm13[6],ymm11[7],ymm13[7],ymm11[12],ymm13[12],ymm11[13],ymm13[13],ymm11[14],ymm13[14],ymm11[15],ymm13[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm15, 160(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 288(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm1, 352(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 160(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 288(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 352(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm9, 96(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, (%rax)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 128(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 192(%rax)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX2-SLOW-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm0, 192(%rax)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 320(%rax)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-SLOW-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-SLOW-NEXT:    addq $296, %rsp # imm = 0x128
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
 ; AVX2-FAST-ALL-LABEL: vf32:
 ; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    subq $648, %rsp # imm = 0x288
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %xmm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %xmm11
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %xmm4
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,1,3,3,4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %xmm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm1, %ymm3, %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm8, %xmm0, %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm0, %xmm13
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm8, %xmm11, %xmm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm4, %xmm14
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %xmm3
+; AVX2-FAST-ALL-NEXT:    subq $392, %rsp # imm = 0x188
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm15
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %xmm14
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm14, %xmm0
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %xmm9
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm9, %xmm1
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,1,3,3,4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm3, %xmm8
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %xmm10
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r9), %xmm3
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm2, %xmm3, %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm3, %xmm7
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[1,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm11
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %xmm2
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %ymm12
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %ymm9
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r9), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm3
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[1],ymm1[1],ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[8],ymm1[8],ymm5[9],ymm1[9],ymm5[10],ymm1[10],ymm5[11],ymm1[11]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm4, %ymm0, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm10, %ymm5, %ymm4, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm6[4],xmm14[4],xmm6[5],xmm14[5],xmm6[6],xmm14[6],xmm6[7],xmm14[7]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm13
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %xmm1
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm7
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %xmm0
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm15, %xmm5
+; AVX2-FAST-ALL-NEXT:    vpshufb %xmm12, %xmm8, %xmm4
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm5 = xmm10[1,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm6 = xmm11[1,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm7, %xmm12
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <1,2,1,2,u,u,3,3>
 ; AVX2-FAST-ALL-NEXT:    vpermd %ymm4, %ymm5, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[1,1,1,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm15[2],ymm4[3,4],ymm15[5],ymm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm15 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm15, %xmm8, %xmm13
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm13 = ymm4[0],ymm13[1],ymm4[2,3],ymm13[4],ymm4[5,6],ymm13[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm1, %xmm7, %xmm4
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm4[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm4, %ymm13, %ymm0, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm5, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,1,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[1,1,1,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %ymm4
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm10, %ymm4, %ymm4
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm10, %ymm5, %ymm5
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[8],ymm4[8],ymm5[9],ymm4[9],ymm5[10],ymm4[10],ymm5[11],ymm4[11]
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %ymm6
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r9), %ymm5
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[1,2,2,3,5,6,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm7 = ymm6[1,2,2,3,5,6,6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %ymm6
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %ymm8
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3]
+; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2]
+; AVX2-FAST-ALL-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm12[4],ymm9[4],ymm12[5],ymm9[5],ymm12[6],ymm9[6],ymm12[7],ymm9[7],ymm12[12],ymm9[12],ymm12[13],ymm9[13],ymm12[14],ymm9[14],ymm12[15],ymm9[15]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm1 = [5,6,5,6,5,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm1 = <1,2,1,2,u,u,3,3>
 ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm13, %ymm9, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm15, %ymm6, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm5, %ymm0
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm15, %xmm7
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm15
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm10, %ymm15, %ymm0
+; AVX2-FAST-ALL-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm13
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %ymm12
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm12[1,2,2,3,5,6,6,7]
+; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm9 = ymm13[1,2,2,3,5,6,6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm9[0],ymm3[0],ymm9[1],ymm3[1],ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[8],ymm3[8],ymm9[9],ymm3[9],ymm9[10],ymm3[10],ymm9[11],ymm3[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm9 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm6
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpsrldq {{.*#+}} ymm14 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm14[0],ymm10[0],ymm14[1],ymm10[1],ymm14[2],ymm10[2],ymm14[3],ymm10[3],ymm14[8],ymm10[8],ymm14[9],ymm10[9],ymm14[10],ymm10[10],ymm14[11],ymm10[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
+; AVX2-FAST-ALL-NEXT:    # xmm9 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm9, %ymm4, %ymm4
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm9[0],ymm4[1],ymm9[2,3],ymm4[4],ymm9[5,6],ymm4[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7]
 ; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm11[0],ymm6[0],ymm11[1],ymm6[1],ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[8],ymm6[8],ymm11[9],ymm6[9],ymm11[10],ymm6[10],ymm11[11],ymm6[11]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm9 = [5,4,2,2,5,4,6,6]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm7, %ymm9, %ymm7
+; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm4[0],ymm15[0],ymm4[1],ymm15[1],ymm4[2],ymm15[2],ymm4[3],ymm15[3],ymm4[8],ymm15[8],ymm4[9],ymm15[9],ymm4[10],ymm15[10],ymm4[11],ymm15[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6],ymm7[7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[8],ymm12[8],ymm13[9],ymm12[9],ymm13[10],ymm12[10],ymm13[11],ymm12[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm1 = [5,6,5,6,5,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm10, %ymm1, %ymm10
+; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm14 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0],ymm14[1],ymm10[2,3],ymm14[4],ymm10[5,6],ymm14[7]
+; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm14 = ymm5[4],ymm2[4],ymm5[5],ymm2[5],ymm5[6],ymm2[6],ymm5[7],ymm2[7],ymm5[12],ymm2[12],ymm5[13],ymm2[13],ymm5[14],ymm2[14],ymm5[15],ymm2[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[3,3,3,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm14[2],ymm10[3,4],ymm14[5],ymm10[6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm0[0],ymm8[0],ymm0[1],ymm8[1],ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[8],ymm8[8],ymm0[9],ymm8[9],ymm0[10],ymm8[10],ymm0[11],ymm8[11]
+; AVX2-FAST-ALL-NEXT:    vpermd %ymm8, %ymm9, %ymm8
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm5[0],ymm2[0],ymm5[1],ymm2[1],ymm5[2],ymm2[2],ymm5[3],ymm2[3],ymm5[8],ymm2[8],ymm5[9],ymm2[9],ymm5[10],ymm2[10],ymm5[11],ymm2[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
+; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm11[4],ymm6[4],ymm11[5],ymm6[5],ymm11[6],ymm6[6],ymm11[7],ymm6[7],ymm11[12],ymm6[12],ymm11[13],ymm6[13],ymm11[14],ymm6[14],ymm11[15],ymm6[15]
+; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm1 = [5,6,5,6,5,6,7,7]
 ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm13[4],ymm12[4],ymm13[5],ymm12[5],ymm13[6],ymm12[6],ymm13[7],ymm12[7],ymm13[12],ymm12[12],ymm13[13],ymm12[13],ymm13[14],ymm12[14],ymm13[15],ymm12[15]
+; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-FAST-ALL-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm4[4],ymm15[4],ymm4[5],ymm15[5],ymm4[6],ymm15[6],ymm4[7],ymm15[7],ymm4[12],ymm15[12],ymm4[13],ymm15[13],ymm4[14],ymm15[14],ymm4[15],ymm15[15]
 ; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
 ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm13, %ymm3, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpshufb %ymm15, %ymm2, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd (%rsp), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1],xmm4[2],mem[2],xmm4[3],mem[3]
-; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [1,0,2,2,1,0,2,2]
-; AVX2-FAST-ALL-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm13, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm4, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm4 = mem[0,0,2,1,4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm4, %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm15, %ymm1, %ymm4, %ymm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1],xmm4[2],mem[2],xmm4[3],mem[3]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm4, %ymm13, %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm13 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm13 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0],ymm4[1],ymm13[2,3],ymm4[4],ymm13[5,6],ymm4[7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm14[0,0,2,1,4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm15, %ymm4, %ymm5, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # ymm5 = ymm12[0],mem[0],ymm12[1],mem[1],ymm12[2],mem[2],ymm12[3],mem[3],ymm12[8],mem[8],ymm12[9],mem[9],ymm12[10],mem[10],ymm12[11],mem[11]
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm13 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm14 = [5,4,2,2,5,4,6,6]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm5, %ymm14, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0],ymm5[1],ymm13[2,3],ymm5[4],ymm13[5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm9[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm12[2],ymm5[3,4],ymm12[5],ymm5[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm6[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm15, %ymm5, %ymm11, %ymm5
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # ymm6 = ymm6[0],mem[0],ymm6[1],mem[1],ymm6[2],mem[2],ymm6[3],mem[3],ymm6[8],mem[8],ymm6[9],mem[9],ymm6[10],mem[10],ymm6[11],mem[11]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm6, %ymm14, %ymm6
-; AVX2-FAST-ALL-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7]
-; AVX2-FAST-ALL-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm15, %ymm3, %ymm2, %ymm2
 ; AVX2-FAST-ALL-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, 96(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 160(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm5, 288(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm8, 288(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm10, 352(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm7, 96(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 352(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm4, (%rax)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, (%rax)
+; AVX2-FAST-ALL-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 128(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 64(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 128(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 192(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 192(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 320(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-ALL-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-FAST-ALL-NEXT:    addq $392, %rsp # imm = 0x188
 ; AVX2-FAST-ALL-NEXT:    vzeroupper
 ; AVX2-FAST-ALL-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: vf32:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    subq $664, %rsp # imm = 0x298
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %xmm12
+; AVX2-FAST-PERLANE-NEXT:    subq $168, %rsp
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rcx), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm13, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm4, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm0[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[1,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm11[1],ymm1[2,3],ymm11[4],ymm1[5,6],ymm11[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm1[2],ymm11[3,4],ymm1[5],ymm11[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,1,3,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,2,3,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm9, %ymm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm12, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm5, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[1,1,1,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm12, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm5, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm11[1,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm13[1,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm15, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm7[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm15
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm9[0],ymm0[1],ymm9[1],ymm0[2],ymm9[2],ymm0[3],ymm9[3],ymm0[8],ymm9[8],ymm0[9],ymm9[9],ymm0[10],ymm9[10],ymm0[11],ymm9[11]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, %ymm13
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm7 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm10[0],ymm7[0],ymm10[1],ymm7[1],ymm10[2],ymm7[2],ymm10[3],ymm7[3],ymm10[8],ymm7[8],ymm10[9],ymm7[9],ymm10[10],ymm7[10],ymm10[11],ymm7[11]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm10, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm9, %ymm1, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm11[0],ymm1[0],ymm11[1],ymm1[1],ymm11[2],ymm1[2],ymm11[3],ymm1[3],ymm11[8],ymm1[8],ymm11[9],ymm1[9],ymm11[10],ymm1[10],ymm11[11],ymm1[11]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm12[0],ymm15[1],ymm12[1],ymm15[2],ymm12[2],ymm15[3],ymm12[3],ymm15[8],ymm12[8],ymm15[9],ymm12[9],ymm15[10],ymm12[10],ymm15[11],ymm12[11]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm0[1,2],ymm11[3],ymm0[4,5],ymm11[6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm9, %ymm11, %ymm8, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm12, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[1,1,1,1]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm1, %xmm11
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm11[1],ymm7[2,3],ymm11[4],ymm7[5,6],ymm11[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm1, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm7, %ymm14, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[1,1,1,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsp), %xmm5 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm14[2],ymm7[3,4],ymm14[5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm6, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm9, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm2, %ymm7, %ymm8, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm12[4],ymm15[4],ymm12[5],ymm15[5],ymm12[6],ymm15[6],ymm12[7],ymm15[7],ymm12[12],ymm15[12],ymm12[13],ymm15[13],ymm12[14],ymm15[14],ymm12[15],ymm15[15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[3,3,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm14[4],ymm5[4],ymm14[5],ymm5[5],ymm14[6],ymm5[6],ymm14[7],ymm5[7],ymm14[12],ymm5[12],ymm14[13],ymm5[13],ymm14[14],ymm5[14],ymm14[15],ymm5[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,1,2,3,6,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[3,3,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm0, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm1, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[8],ymm6[8],ymm8[9],ymm6[9],ymm8[10],ymm6[10],ymm8[11],ymm6[11]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm8 = ymm5[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm10 = ymm14[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[8],ymm8[8],ymm10[9],ymm8[9],ymm10[10],ymm8[10],ymm10[11],ymm8[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm4, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm11, %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm7, %ymm15, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm15 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm15 = ymm7[4],mem[4],ymm7[5],mem[5],ymm7[6],mem[6],ymm7[7],mem[7],ymm7[12],mem[12],ymm7[13],mem[13],ymm7[14],mem[14],ymm7[15],mem[15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm11 = ymm7[4],mem[4],ymm7[5],mem[5],ymm7[6],mem[6],ymm7[7],mem[7],ymm7[12],mem[12],ymm7[13],mem[13],ymm7[14],mem[14],ymm7[15],mem[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm15[2],ymm11[3,4],ymm15[5],ymm11[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm7, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm14[1],ymm11[2,3],ymm14[4],ymm11[5,6],ymm14[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm15, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm11, %ymm8, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm7[2],ymm0[3,4],ymm7[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm7 = mem[0,0,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm0, %ymm7, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm7[2],ymm0[3,4],ymm7[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm9[0,0,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm0, %ymm7, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm7 = ymm12[0],mem[0],ymm12[1],mem[1],ymm12[2],mem[2],ymm12[3],mem[3],ymm12[8],mem[8],ymm12[9],mem[9],ymm12[10],mem[10],ymm12[11],mem[11]
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[1,0,2,2,5,4,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2,3],ymm12[4],ymm7[5,6],ymm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm4[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm12[2],ymm7[3,4],ymm12[5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm9 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm7, %ymm9, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm5 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm5[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,0,2,2,5,4,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm15[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0],ymm6[1],ymm8[2,3],ymm6[4],ymm8[5,6],ymm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[8],ymm8[8],ymm10[9],ymm8[9],ymm10[10],ymm8[10],ymm10[11],ymm8[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2],ymm6[3,4],ymm8[5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm5[0],ymm14[1],ymm5[1],ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[8],ymm5[8],ymm14[9],ymm5[9],ymm14[10],ymm5[10],ymm14[11],ymm5[11]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm15[4],ymm12[4],ymm15[5],ymm12[5],ymm15[6],ymm12[6],ymm15[7],ymm12[7],ymm15[12],ymm12[12],ymm15[13],ymm12[13],ymm15[14],ymm12[14],ymm15[15],ymm12[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm9[4],ymm7[4],ymm9[5],ymm7[5],ymm9[6],ymm7[6],ymm9[7],ymm7[7],ymm9[12],ymm7[12],ymm9[13],ymm7[13],ymm9[14],ymm7[14],ymm9[15],ymm7[15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm13[4],ymm4[5],ymm13[5],ymm4[6],ymm13[6],ymm4[7],ymm13[7],ymm4[12],ymm13[12],ymm4[13],ymm13[13],ymm4[14],ymm13[14],ymm4[15],ymm13[15]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm13, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm3 = ymm12[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm4 = ymm15[1,2,2,3,5,6,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm7[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm11, 160(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 128(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm7, 288(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 352(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 320(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 128(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 160(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 288(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, 320(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm11, 352(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 96(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 64(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 224(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm14, 192(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 192(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    addq $664, %rsp # imm = 0x298
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-FAST-PERLANE-NEXT:    addq $168, %rsp
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; AVX512-LABEL: vf32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm2
-; AVX512-NEXT:    vmovdqu64 (%rsi), %zmm3
-; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm4
-; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm5
-; AVX512-NEXT:    vmovdqu64 (%r8), %zmm1
-; AVX512-NEXT:    vmovdqu64 (%r9), %zmm0
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34]
-; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm5, %zmm4, %zmm6
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,32,3,35,0,0,1,33,4,36,0,0,2,34,5,37,0,32,3,35,0,0,1,33,4,36,0,0,2,34,5,37]
+; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm3
+; AVX512-NEXT:    vmovdqu64 (%rsi), %zmm4
+; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm1
+; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm2
+; AVX512-NEXT:    vmovdqu64 (%r8), %zmm5
+; AVX512-NEXT:    vmovdqu64 (%r9), %zmm6
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm7
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm7
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,32,3,35,0,0,1,33,4,36,0,0,2,34,5,37,0,32,3,35,0,0,1,33,4,36,0,0,2,34,5,37]
+; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm0
 ; AVX512-NEXT:    movw $9362, %cx # imm = 0x2492
-; AVX512-NEXT:    kmovd %ecx, %k2
-; AVX512-NEXT:    vmovdqa32 %zmm6, %zmm7 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,2,3,32,u,6,7,8,9,33,u,12,13,14,15,34,u,18,19,20,21,35,u,24,25,26,27,36,u,30,31>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm7, %zmm8
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,32,6,7,8,9,10,33,12,13,14,15,16,34,18,19,20,21,22,35,24,25,26,27,28,36,30,31]
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm8, %zmm6
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [8,40,0,0,6,38,9,41,0,0,7,39,10,42,0,0,8,40,0,0,6,38,9,41,0,0,7,39,10,42,0,0]
+; AVX512-NEXT:    kmovd %ecx, %k1
+; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm0 {%k1}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [5,37,8,40,0,0,6,38,9,41,0,0,7,39,10,42,5,37,8,40,0,0,6,38,9,41,0,0,7,39,10,42]
-; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm5, %zmm4, %zmm8
+; AVX512-NEXT:    vpermi2w %zmm6, %zmm5, %zmm7
 ; AVX512-NEXT:    movw $18724, %cx # imm = 0x4924
-; AVX512-NEXT:    kmovd %ecx, %k1
-; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm8 {%k1}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,37,u,4,5,6,7,38,u,10,11,12,13,39,u,16,17,18,19,40,u,22,23,24,25,41,u,28,29,30,31>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm8, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,37,4,5,6,7,8,38,10,11,12,13,14,39,16,17,18,19,20,40,22,23,24,25,26,41,28,29,30,31]
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm7, %zmm8
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0]
+; AVX512-NEXT:    kmovd %ecx, %k2
+; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm0 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,5,37,8,40,0,0,6,38,9,41,0,0,7,39,0,0,5,37,8,40,0,0,6,38,9,41,0,0,7,39]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm6, %zmm5, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [5,37,8,40,0,0,6,38,9,41,0,0,7,39,10,42,5,37,8,40,0,0,6,38,9,41,0,0,7,39,10,42]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm5, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45]
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm7
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm7 {%k1}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [8,40,0,0,6,38,9,41,0,0,7,39,10,42,0,0,8,40,0,0,6,38,9,41,0,0,7,39,10,42,0,0]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm7 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [10,42,13,45,0,0,11,43,14,46,0,0,12,44,15,47,10,42,13,45,0,0,11,43,14,46,0,0,12,44,15,47]
 ; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm9
-; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm9 {%k1}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <42,u,2,3,4,5,43,u,8,9,10,11,44,u,14,15,16,17,45,u,20,21,22,23,46,u,26,27,28,29,47,u>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,42,2,3,4,5,6,43,8,9,10,11,12,44,14,15,16,17,18,45,20,21,22,23,24,46,26,27,28,29,30,47]
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm7, %zmm9
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm5, %zmm4, %zmm7
+; AVX512-NEXT:    vpermi2w %zmm6, %zmm5, %zmm9
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm9 {%k1}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm8
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm9 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm8
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [16,48,19,51,0,0,17,49,20,52,0,0,18,50,21,53,16,48,19,51,0,0,17,49,20,52,0,0,18,50,21,53]
 ; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm10
-; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm10 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,48,u,6,7,8,9,49,u,12,13,14,15,50,u,18,19,20,21,51,u,24,25,26,27,52,u,30,31>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm10, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,48,6,7,8,9,10,49,12,13,14,15,16,50,18,19,20,21,22,51,24,25,26,27,28,52,30,31]
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm7, %zmm10
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [24,56,0,0,22,54,25,57,0,0,23,55,26,58,0,0,24,56,0,0,22,54,25,57,0,0,23,55,26,58,0,0]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm7
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm10
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm10 {%k1}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm6, %zmm5, %zmm8
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm10 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,21,53,24,56,0,0,22,54,25,57,0,0,23,55,0,0,21,53,24,56,0,0,22,54,25,57,0,0,23,55]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm6, %zmm5, %zmm8
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [21,53,24,56,0,0,22,54,25,57,0,0,23,55,26,58,21,53,24,56,0,0,22,54,25,57,0,0,23,55,26,58]
 ; AVX512-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm5, %zmm4, %zmm11
-; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm11 {%k1}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,53,u,4,5,6,7,54,u,10,11,12,13,55,u,16,17,18,19,56,u,22,23,24,25,57,u,28,29,30,31>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm11, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,1,2,53,4,5,6,7,8,54,10,11,12,13,14,55,16,17,18,19,20,56,22,23,24,25,26,57,28,29,30,31]
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm7, %zmm11
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm5, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61]
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm11
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm11 {%k1}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [24,56,0,0,22,54,25,57,0,0,23,55,26,58,0,0,24,56,0,0,22,54,25,57,0,0,23,55,26,58,0,0]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm11 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm4, %zmm3, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [26,58,29,61,0,0,27,59,30,62,0,0,28,60,31,63,26,58,29,61,0,0,27,59,30,62,0,0,28,60,31,63]
+; AVX512-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2w %zmm6, %zmm5, %zmm3
+; AVX512-NEXT:    vmovdqa32 %zmm8, %zmm3 {%k1}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0]
 ; AVX512-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2w %zmm3, %zmm2, %zmm4
-; AVX512-NEXT:    vmovdqa32 %zmm7, %zmm4 {%k1}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <58,u,2,3,4,5,59,u,8,9,10,11,60,u,14,15,16,17,61,u,20,21,22,23,62,u,26,27,28,29,63,u>
-; AVX512-NEXT:    vpermi2w %zmm1, %zmm4, %zmm2
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,58,2,3,4,5,6,59,8,9,10,11,12,60,14,15,16,17,18,61,20,21,22,23,24,62,26,27,28,29,30,63]
-; AVX512-NEXT:    vpermi2w %zmm0, %zmm2, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, 320(%rax)
+; AVX512-NEXT:    vpermi2w %zmm2, %zmm1, %zmm4
+; AVX512-NEXT:    vmovdqa32 %zmm4, %zmm3 {%k2}
+; AVX512-NEXT:    vmovdqu64 %zmm3, 320(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm11, 256(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm10, 192(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm9, 128(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm8, 64(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm6, (%rax)
+; AVX512-NEXT:    vmovdqu64 %zmm7, 64(%rax)
+; AVX512-NEXT:    vmovdqu64 %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <32 x i16>, <32 x i16>* %in.vecptr0, align 32

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
index 40cfa16b3df95..0c05b3e104be2 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
@@ -90,21 +90,22 @@ define void @store_i32_stride3_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps (%rsi), %xmm1
 ; SSE-NEXT:    movaps (%rdx), %xmm2
-; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm2[0,3]
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3]
 ; SSE-NEXT:    movaps %xmm0, %xmm4
 ; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,0]
-; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    movaps %xmm1, 16(%rcx)
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
+; SSE-NEXT:    movaps %xmm1, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,3],xmm2[3,3]
+; SSE-NEXT:    movaps %xmm2, %xmm5
+; SSE-NEXT:    unpckhps {{.*#+}} xmm5 = xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,2]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, 16(%rcx)
+; SSE-NEXT:    movaps %xmm5, 32(%rcx)
 ; SSE-NEXT:    movaps %xmm4, (%rcx)
-; SSE-NEXT:    movaps %xmm0, 32(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i32_stride3_vf4:
@@ -206,73 +207,74 @@ define void @store_i32_stride3_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr
 define void @store_i32_stride3_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr1, <8 x i32>* %in.vecptr2, <24 x i32>* %out.vec) nounwind {
 ; SSE-LABEL: store_i32_stride3_vf8:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movaps (%rdi), %xmm6
-; SSE-NEXT:    movaps 16(%rdi), %xmm1
-; SSE-NEXT:    movaps (%rsi), %xmm3
+; SSE-NEXT:    movaps (%rdi), %xmm1
+; SSE-NEXT:    movaps 16(%rdi), %xmm0
+; SSE-NEXT:    movaps (%rsi), %xmm8
 ; SSE-NEXT:    movaps 16(%rsi), %xmm5
-; SSE-NEXT:    movaps (%rdx), %xmm8
-; SSE-NEXT:    movaps 16(%rdx), %xmm9
-; SSE-NEXT:    movaps %xmm1, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
-; SSE-NEXT:    movaps %xmm1, %xmm7
-; SSE-NEXT:    movaps %xmm1, %xmm2
-; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm5[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm9[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm9[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,0]
-; SSE-NEXT:    movaps %xmm6, %xmm4
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
-; SSE-NEXT:    movaps %xmm6, %xmm7
-; SSE-NEXT:    movaps %xmm6, %xmm0
-; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm3[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm8[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm8[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,2],xmm9[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0,1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,2],xmm8[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0,1,3]
-; SSE-NEXT:    movaps %xmm0, (%rcx)
-; SSE-NEXT:    movaps %xmm3, 16(%rcx)
-; SSE-NEXT:    movaps %xmm2, 48(%rcx)
-; SSE-NEXT:    movaps %xmm5, 64(%rcx)
+; SSE-NEXT:    movaps (%rdx), %xmm4
+; SSE-NEXT:    movaps 16(%rdx), %xmm6
+; SSE-NEXT:    movaps %xmm5, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3]
+; SSE-NEXT:    movaps %xmm6, %xmm2
+; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm7[0,2]
+; SSE-NEXT:    movaps %xmm0, %xmm7
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm5[1]
+; SSE-NEXT:    movaps %xmm6, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm5[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm7[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[1,3]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2]
+; SSE-NEXT:    movaps %xmm8, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,3],xmm4[3,3]
+; SSE-NEXT:    movaps %xmm4, %xmm6
+; SSE-NEXT:    unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2]
+; SSE-NEXT:    movaps %xmm1, %xmm5
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm8[1]
+; SSE-NEXT:    movaps %xmm4, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,0],xmm8[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[1,3]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0,2]
+; SSE-NEXT:    movaps %xmm1, (%rcx)
+; SSE-NEXT:    movaps %xmm7, 16(%rcx)
 ; SSE-NEXT:    movaps %xmm6, 32(%rcx)
-; SSE-NEXT:    movaps %xmm1, 80(%rcx)
+; SSE-NEXT:    movaps %xmm0, 48(%rcx)
+; SSE-NEXT:    movaps %xmm3, 64(%rcx)
+; SSE-NEXT:    movaps %xmm2, 80(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i32_stride3_vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd (%rdx), %ymm0
-; AVX1-NEXT:    vmovaps (%rsi), %xmm1
+; AVX1-NEXT:    vmovaps (%rsi), %xmm0
+; AVX1-NEXT:    vmovaps (%rdi), %xmm1
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd (%rdx), %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm1
 ; AVX1-NEXT:    vmovaps 16(%rsi), %xmm2
-; AVX1-NEXT:    vmovaps (%rdi), %xmm3
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm2[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT:    vbroadcastsd (%rdx), %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vbroadcastsd 24(%rdi), %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
-; AVX1-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX1-NEXT:    vmovaps %ymm1, (%rcx)
-; AVX1-NEXT:    vmovaps %ymm2, 64(%rcx)
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vmovaps %ymm2, 32(%rcx)
+; AVX1-NEXT:    vmovaps %ymm1, 64(%rcx)
+; AVX1-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -281,26 +283,26 @@ define void @store_i32_stride3_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr
 ; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovaps (%rsi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovaps (%rdx), %ymm2
-; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rdi), %ymm3
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd (%rdx), %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vbroadcastsd (%rdx), %ymm5
+; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rdi), %ymm5
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm4, (%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm3, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm4, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm3, (%rcx)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -309,27 +311,27 @@ define void @store_i32_stride3_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr
 ; AVX2-FAST-ALL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-FAST-ALL-NEXT:    vmovaps (%rsi), %ymm1
 ; AVX2-FAST-ALL-NEXT:    vmovaps (%rdx), %ymm2
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm3 = ymm1[0,0,3,3,4,4,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2]
+; AVX2-FAST-ALL-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm3, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd (%rdx), %ymm4
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
 ; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm2[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm4 = ymm1[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
 ; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u>
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm4, %ymm4
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rdi), %ymm5
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2]
-; AVX2-FAST-ALL-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm4, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd (%rdx), %ymm1
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm2, 64(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm3, 32(%rcx)
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rdi), %ymm2
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm1, 64(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 32(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm3, (%rcx)
 ; AVX2-FAST-ALL-NEXT:    vzeroupper
 ; AVX2-FAST-ALL-NEXT:    retq
 ;
@@ -338,26 +340,26 @@ define void @store_i32_stride3_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rsi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdx), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rdx), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rdx), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rdi), %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, (%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, 64(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, 64(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, (%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -391,191 +393,189 @@ define void @store_i32_stride3_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr
 define void @store_i32_stride3_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vecptr1, <16 x i32>* %in.vecptr2, <48 x i32>* %out.vec) nounwind {
 ; SSE-LABEL: store_i32_stride3_vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movaps (%rdi), %xmm8
+; SSE-NEXT:    movaps (%rdi), %xmm3
 ; SSE-NEXT:    movaps 16(%rdi), %xmm2
-; SSE-NEXT:    movaps 32(%rdi), %xmm5
+; SSE-NEXT:    movaps 32(%rdi), %xmm1
 ; SSE-NEXT:    movaps 48(%rdi), %xmm15
-; SSE-NEXT:    movaps (%rsi), %xmm11
-; SSE-NEXT:    movaps 16(%rsi), %xmm12
+; SSE-NEXT:    movaps (%rsi), %xmm8
+; SSE-NEXT:    movaps 16(%rsi), %xmm10
 ; SSE-NEXT:    movaps 32(%rsi), %xmm14
-; SSE-NEXT:    movaps 48(%rsi), %xmm4
-; SSE-NEXT:    movaps 16(%rdx), %xmm10
-; SSE-NEXT:    movaps 32(%rdx), %xmm3
-; SSE-NEXT:    movaps 48(%rdx), %xmm1
-; SSE-NEXT:    movaps %xmm15, %xmm13
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm4[1]
-; SSE-NEXT:    movaps %xmm15, %xmm6
-; SSE-NEXT:    movaps %xmm15, %xmm9
-; SSE-NEXT:    unpcklps {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[3,3],xmm4[3,3]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm13[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,1],xmm6[2,0]
-; SSE-NEXT:    movaps %xmm5, %xmm6
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm14[1]
-; SSE-NEXT:    movaps %xmm5, %xmm1
+; SSE-NEXT:    movaps 48(%rsi), %xmm0
+; SSE-NEXT:    movaps (%rdx), %xmm12
+; SSE-NEXT:    movaps 16(%rdx), %xmm4
+; SSE-NEXT:    movaps 32(%rdx), %xmm5
+; SSE-NEXT:    movaps 48(%rdx), %xmm6
+; SSE-NEXT:    movaps %xmm0, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3]
+; SSE-NEXT:    movaps %xmm6, %xmm9
+; SSE-NEXT:    unpckhps {{.*#+}} xmm9 = xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,3],xmm7[0,2]
+; SSE-NEXT:    movaps %xmm15, %xmm7
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm6, %xmm11
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,0],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[2,0],xmm7[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm15[1,3]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[0,1],xmm6[0,2]
+; SSE-NEXT:    movaps %xmm14, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm5[3,3]
 ; SSE-NEXT:    movaps %xmm5, %xmm13
-; SSE-NEXT:    unpcklps {{.*#+}} xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,3],xmm14[3,3]
-; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[1,1],xmm3[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,2],xmm6[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[0,1],xmm1[2,0]
-; SSE-NEXT:    movaps %xmm2, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm12[1]
-; SSE-NEXT:    movaps %xmm2, %xmm6
-; SSE-NEXT:    movaps %xmm2, %xmm7
-; SSE-NEXT:    unpcklps {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,3],xmm12[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[1,1],xmm10[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,2],xmm1[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,1],xmm10[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,0]
-; SSE-NEXT:    movaps %xmm8, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm11[1]
-; SSE-NEXT:    movaps %xmm8, %xmm6
-; SSE-NEXT:    unpcklps {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
-; SSE-NEXT:    movaps %xmm8, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[3,3],xmm11[3,3]
-; SSE-NEXT:    movaps (%rdx), %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,1],xmm0[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,2],xmm1[0,2]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[0,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm3[2,0]
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm15 = xmm15[1,2],mem[2,3]
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm5 = xmm5[1,2],mem[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,2],xmm10[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,2],xmm0[2,3]
-; SSE-NEXT:    movaps %xmm6, (%rcx)
-; SSE-NEXT:    movaps %xmm11, 16(%rcx)
-; SSE-NEXT:    movaps %xmm7, 48(%rcx)
-; SSE-NEXT:    movaps %xmm12, 64(%rcx)
-; SSE-NEXT:    movaps %xmm13, 96(%rcx)
-; SSE-NEXT:    movaps %xmm14, 112(%rcx)
-; SSE-NEXT:    movaps %xmm9, 144(%rcx)
-; SSE-NEXT:    movaps %xmm4, 160(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,0,1,3]
-; SSE-NEXT:    movaps %xmm8, 32(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0,1,3]
-; SSE-NEXT:    movaps %xmm2, 80(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0,1,3]
-; SSE-NEXT:    movaps %xmm5, 128(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[2,0,1,3]
-; SSE-NEXT:    movaps %xmm15, 176(%rcx)
+; SSE-NEXT:    unpckhps {{.*#+}} xmm13 = xmm13[2],xmm1[2],xmm13[3],xmm1[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm14[1]
+; SSE-NEXT:    movaps %xmm5, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,0],xmm14[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[1,3]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0,2]
+; SSE-NEXT:    movaps %xmm10, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm4[3,3]
+; SSE-NEXT:    movaps %xmm4, %xmm14
+; SSE-NEXT:    unpckhps {{.*#+}} xmm14 = xmm14[2],xmm2[2],xmm14[3],xmm2[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1]
+; SSE-NEXT:    movaps %xmm4, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,0],xmm10[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm2[1,3]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0,2]
+; SSE-NEXT:    movaps %xmm8, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm12[3,3]
+; SSE-NEXT:    movaps %xmm12, %xmm4
+; SSE-NEXT:    unpckhps {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm3, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
+; SSE-NEXT:    movaps %xmm12, %xmm6
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,0],xmm8[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,1],xmm3[1,3]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm12[0,2]
+; SSE-NEXT:    movaps %xmm3, (%rcx)
+; SSE-NEXT:    movaps %xmm6, 16(%rcx)
+; SSE-NEXT:    movaps %xmm4, 32(%rcx)
+; SSE-NEXT:    movaps %xmm2, 48(%rcx)
+; SSE-NEXT:    movaps %xmm5, 64(%rcx)
+; SSE-NEXT:    movaps %xmm14, 80(%rcx)
+; SSE-NEXT:    movaps %xmm1, 96(%rcx)
+; SSE-NEXT:    movaps %xmm7, 112(%rcx)
+; SSE-NEXT:    movaps %xmm13, 128(%rcx)
+; SSE-NEXT:    movaps %xmm15, 144(%rcx)
+; SSE-NEXT:    movaps %xmm11, 160(%rcx)
+; SSE-NEXT:    movaps %xmm9, 176(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i32_stride3_vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps (%rdx), %ymm8
-; AVX1-NEXT:    vmovapd 32(%rdx), %ymm9
-; AVX1-NEXT:    vmovaps (%rsi), %xmm2
-; AVX1-NEXT:    vmovaps 16(%rsi), %xmm3
-; AVX1-NEXT:    vmovapd 32(%rsi), %xmm4
-; AVX1-NEXT:    vmovaps 48(%rsi), %xmm5
-; AVX1-NEXT:    vmovaps (%rdi), %xmm6
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm7
-; AVX1-NEXT:    vmovapd 32(%rdi), %xmm0
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[1,1],xmm1[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vbroadcastsd (%rdx), %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX1-NEXT:    vmovaps 48(%rdi), %xmm1
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm1[3,3],xmm5[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm5[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm9[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = ymm5[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3],ymm5[4],ymm1[5,6],ymm5[7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm4[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[1,1],xmm5[0,2]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm4[0],xmm0[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm4[2,0],xmm0[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX1-NEXT:    vbroadcastsd 32(%rdx), %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm7[3,3],xmm3[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm3[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm5[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm8[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm4 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = ymm9[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm0
+; AVX1-NEXT:    vmovaps (%rdi), %xmm1
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm1[1],xmm0[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm0[1,1],xmm3[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd (%rdx), %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vmovaps 48(%rdx), %xmm1
+; AVX1-NEXT:    vmovaps 48(%rsi), %xmm3
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[3,0],xmm1[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[2,1],xmm4[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT:    vbroadcastsd 56(%rdi), %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm3
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm2[1],xmm3[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[1,1],xmm4[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[2,0],xmm2[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vbroadcastsd 32(%rdx), %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm3
+; AVX1-NEXT:    vmovaps 16(%rsi), %xmm4
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[3,0],xmm3[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm3[2,1],xmm5[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm4[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm3
+; AVX1-NEXT:    vbroadcastsd 24(%rdi), %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm6 = ymm8[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6],ymm5[7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7]
 ; AVX1-NEXT:    vmovaps %ymm5, 32(%rcx)
 ; AVX1-NEXT:    vmovaps %ymm4, 128(%rcx)
 ; AVX1-NEXT:    vmovaps %ymm3, 64(%rcx)
-; AVX1-NEXT:    vmovaps %ymm0, 96(%rcx)
+; AVX1-NEXT:    vmovaps %ymm2, 96(%rcx)
 ; AVX1-NEXT:    vmovaps %ymm1, 160(%rcx)
-; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: store_i32_stride3_vf16:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX2-SLOW-NEXT:    vmovaps (%rsi), %ymm2
+; AVX2-SLOW-NEXT:    vmovaps (%rsi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rsi), %ymm5
-; AVX2-SLOW-NEXT:    vmovaps (%rdx), %ymm1
+; AVX2-SLOW-NEXT:    vmovaps (%rdx), %ymm3
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdx), %ymm6
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm0[0,0,2,1]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm2 = mem[1,0,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm1[0,0,2,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6],ymm2[7]
 ; AVX2-SLOW-NEXT:    vbroadcastsd (%rdx), %ymm7
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd 56(%rdi), %ymm7
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm6[2,1,3,3]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm5[1,2,3,3,5,6,7,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm8 = ymm6[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 56(%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm8 = mem[1,0,2,2]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,1]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm4[0,0,2,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
 ; AVX2-SLOW-NEXT:    vbroadcastsd 32(%rdx), %ymm9
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm2[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm3[2,1,3,3]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm0[1,2,3,3,5,6,7,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm10 = ymm1[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rdi), %ymm10
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,0,3,3,4,4,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm4, 128(%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm8, 96(%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm7, 160(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm2, (%rcx)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -594,89 +594,89 @@ define void @store_i32_stride3_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
 ; AVX2-FAST-ALL-NEXT:    vbroadcastsd (%rdx), %ymm8
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm8 = ymm3[0,0,3,3,4,4,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm9 = ymm1[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm9 = ymm5[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3],ymm8[4,5],ymm9[6],ymm8[7]
-; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm9 = [5,6,5,6,5,6,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm3, %ymm9, %ymm10
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 56(%rdi), %ymm11
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1],ymm10[2,3],ymm5[4],ymm10[5,6],ymm5[7]
+; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm8 = [5,6,5,6,5,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm3, %ymm8, %ymm9
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm10 = ymm5[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 56(%rdi), %ymm10
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm10 = ymm1[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6],ymm10[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm10 = ymm3[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2],ymm5[3,4],ymm10[5],ymm5[6,7]
 ; AVX2-FAST-ALL-NEXT:    vpermps %ymm3, %ymm6, %ymm3
 ; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
 ; AVX2-FAST-ALL-NEXT:    vbroadcastsd 32(%rdx), %ymm3
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm2, %ymm9, %ymm3
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rdi), %ymm6
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm6 = ymm4[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6],ymm6[7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm4[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm3 = ymm4[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm3 = ymm2[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm2, %ymm8, %ymm2
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm3 = ymm4[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rdi), %ymm3
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm2, 64(%rcx)
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm3, 64(%rcx)
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm1, 96(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm5, 160(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm8, 128(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm5, 128(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm9, 160(%rcx)
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm7, (%rcx)
 ; AVX2-FAST-ALL-NEXT:    vzeroupper
 ; AVX2-FAST-ALL-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf16:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rsi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rsi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rsi), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdx), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdx), %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdx), %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm0[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm2 = mem[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm1[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6],ymm2[7]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rdx), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 56(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm6[2,1,3,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm5[1,2,3,3,5,6,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm8 = ymm6[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 56(%rdi), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm8 = mem[1,0,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm4[0,0,2,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 32(%rdx), %ymm9
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rdi), %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm2[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm3[2,1,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm0[1,2,3,3,5,6,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm10 = ymm1[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rdi), %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,0,3,3,4,4,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, 128(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm9, 64(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm8, 96(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, 160(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, (%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -689,14 +689,14 @@ define void @store_i32_stride3_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
 ; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15]
 ; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,u,22,6,u,23,7,u,24,8,u,25,9,u,26,10>
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15]
-; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,11,27,u,12,28,u,13,29,u,14,30,u,15,31,u>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31]
-; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,21,u,6,22,u,7,23,u,8,24,u,9,25,u,10>
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,22,3,4,23,6,7,24,9,10,25,12,13,26,15]
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <10,27,u,11,28,u,12,29,u,13,30,u,14,31,u,15>
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,27,3,4,28,6,7,29,9,10,30,12,13,31,15]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm3, %zmm0
 ; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm4, (%rcx)
@@ -719,309 +719,290 @@ define void @store_i32_stride3_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
 define void @store_i32_stride3_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vecptr1, <32 x i32>* %in.vecptr2, <96 x i32>* %out.vec) nounwind {
 ; SSE-LABEL: store_i32_stride3_vf32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    subq $168, %rsp
-; SSE-NEXT:    movaps (%rdi), %xmm1
+; SSE-NEXT:    subq $56, %rsp
+; SSE-NEXT:    movaps 64(%rdi), %xmm12
+; SSE-NEXT:    movaps (%rdi), %xmm2
 ; SSE-NEXT:    movaps 16(%rdi), %xmm10
 ; SSE-NEXT:    movaps 32(%rdi), %xmm9
 ; SSE-NEXT:    movaps 48(%rdi), %xmm8
-; SSE-NEXT:    movaps (%rsi), %xmm4
-; SSE-NEXT:    movaps 16(%rsi), %xmm5
-; SSE-NEXT:    movaps 32(%rsi), %xmm14
-; SSE-NEXT:    movaps 48(%rsi), %xmm13
-; SSE-NEXT:    movaps (%rdx), %xmm6
-; SSE-NEXT:    movaps 16(%rdx), %xmm7
-; SSE-NEXT:    movaps 32(%rdx), %xmm3
-; SSE-NEXT:    movaps 48(%rdx), %xmm2
+; SSE-NEXT:    movaps (%rsi), %xmm7
+; SSE-NEXT:    movaps 16(%rsi), %xmm3
+; SSE-NEXT:    movaps 32(%rsi), %xmm5
+; SSE-NEXT:    movaps 48(%rsi), %xmm6
+; SSE-NEXT:    movaps (%rdx), %xmm1
+; SSE-NEXT:    movaps 16(%rdx), %xmm11
+; SSE-NEXT:    movaps 32(%rdx), %xmm13
+; SSE-NEXT:    movaps 48(%rdx), %xmm14
 ; SSE-NEXT:    movaps %xmm1, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm6[0,3]
-; SSE-NEXT:    movaps %xmm6, %xmm11
-; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm1, %xmm6
-; SSE-NEXT:    unpcklps {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[2,0]
-; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm1, %xmm6
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm4[3,3]
-; SSE-NEXT:    movaps %xmm1, %xmm15
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm11[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm6[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[1,3]
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm10, %xmm0
-; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm7[0,3]
-; SSE-NEXT:    movaps %xmm10, %xmm1
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm10, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[3,3],xmm5[3,3]
-; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm7[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm9, %xmm0
-; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[0,3]
-; SSE-NEXT:    movaps %xmm9, %xmm4
-; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,0]
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
+; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,0],xmm7[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm1[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,3],xmm7[0,2]
+; SSE-NEXT:    movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm11, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[1,3]
+; SSE-NEXT:    movaps %xmm10, %xmm2
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm10, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm11, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,3],xmm11[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm11 = xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,3],xmm3[0,2]
+; SSE-NEXT:    movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm13, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[1,3]
+; SSE-NEXT:    movaps %xmm9, %xmm2
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-NEXT:    movaps %xmm9, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm14[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[3,3],xmm14[3,3]
-; SSE-NEXT:    movaps %xmm9, (%rsp) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[1,1],xmm3[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm8, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
+; SSE-NEXT:    movaps %xmm13, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2]
 ; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[0,3]
-; SSE-NEXT:    movaps %xmm8, %xmm1
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm8, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm13[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[3,3],xmm13[3,3]
-; SSE-NEXT:    movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[1,1],xmm2[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[0,2],xmm0[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,3],xmm13[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm13 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[0,3],xmm5[0,2]
 ; SSE-NEXT:    movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 64(%rdi), %xmm13
-; SSE-NEXT:    movaps 64(%rdx), %xmm1
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm13, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,3]
-; SSE-NEXT:    movaps 64(%rsi), %xmm11
-; SSE-NEXT:    movaps %xmm13, %xmm12
-; SSE-NEXT:    unpcklps {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,1],xmm0[2,0]
-; SSE-NEXT:    movaps %xmm13, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm11[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[3,3],xmm11[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[1,1],xmm1[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
-; SSE-NEXT:    movaps 80(%rdi), %xmm2
-; SSE-NEXT:    movaps 80(%rdx), %xmm3
-; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm2, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[0,3]
-; SSE-NEXT:    movaps 80(%rsi), %xmm8
-; SSE-NEXT:    movaps %xmm2, %xmm10
-; SSE-NEXT:    unpcklps {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,1],xmm1[2,0]
-; SSE-NEXT:    movaps %xmm2, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm8[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,3],xmm8[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[1,1],xmm3[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[0,2],xmm1[0,2]
-; SSE-NEXT:    movaps 96(%rdi), %xmm0
-; SSE-NEXT:    movaps 96(%rdx), %xmm4
-; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm4[0,3]
-; SSE-NEXT:    movaps 96(%rsi), %xmm6
-; SSE-NEXT:    movaps %xmm0, %xmm7
-; SSE-NEXT:    unpcklps {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm3[2,0]
-; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm6[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,1],xmm4[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm3[0,2]
-; SSE-NEXT:    movaps 112(%rdi), %xmm3
-; SSE-NEXT:    movaps 112(%rdx), %xmm9
-; SSE-NEXT:    movaps %xmm3, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm9[0,3]
-; SSE-NEXT:    movaps 112(%rsi), %xmm1
-; SSE-NEXT:    movaps %xmm3, %xmm4
-; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,0]
-; SSE-NEXT:    movaps %xmm3, %xmm5
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,3],xmm1[3,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm9[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
-; SSE-NEXT:    movaps %xmm15, %xmm5
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm5 = xmm5[1,2],mem[2,3]
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm15 = xmm15[1,2],mem[2,3]
-; SSE-NEXT:    movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps (%rsp), %xmm15 # 16-byte Reload
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm15 = xmm15[1,2],mem[2,3]
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm14 = xmm14[1,2],mem[2,3]
+; SSE-NEXT:    movaps %xmm14, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm8[1,3]
+; SSE-NEXT:    movaps %xmm8, %xmm2
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm8, %xmm0
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1]
+; SSE-NEXT:    movaps %xmm14, %xmm15
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[1,0],xmm6[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[2,0],xmm0[0,2]
+; SSE-NEXT:    movaps 64(%rsi), %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm14[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm14 = xmm14[2],xmm8[2],xmm14[3],xmm8[3]
+; SSE-NEXT:    movaps 64(%rdx), %xmm10
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,3],xmm6[0,2]
 ; SSE-NEXT:    movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm13 = xmm13[1,2],mem[2,3]
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm2 = xmm2[1,2],mem[2,3]
-; SSE-NEXT:    shufps $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm0 = xmm0[1,2],mem[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,2],xmm9[2,3]
+; SSE-NEXT:    movaps %xmm10, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm12[1,3]
+; SSE-NEXT:    movaps %xmm12, %xmm14
+; SSE-NEXT:    unpcklps {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,1],xmm1[0,2]
+; SSE-NEXT:    movaps %xmm12, %xmm1
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm10, %xmm13
+; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[1,0],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[2,0],xmm1[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm10[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps 80(%rdi), %xmm3
+; SSE-NEXT:    movaps 80(%rdx), %xmm12
+; SSE-NEXT:    movaps %xmm12, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1,3]
+; SSE-NEXT:    movaps 80(%rsi), %xmm0
+; SSE-NEXT:    movaps %xmm3, %xmm11
+; SSE-NEXT:    unpcklps {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,1],xmm1[0,2]
+; SSE-NEXT:    movaps %xmm3, %xmm1
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm12, %xmm9
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[1,0],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,0],xmm1[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm12[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm12 = xmm12[2],xmm3[2],xmm12[3],xmm3[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps 96(%rdi), %xmm3
+; SSE-NEXT:    movaps 96(%rdx), %xmm6
+; SSE-NEXT:    movaps %xmm6, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3]
+; SSE-NEXT:    movaps 96(%rsi), %xmm0
+; SSE-NEXT:    movaps %xmm3, %xmm8
+; SSE-NEXT:    unpcklps {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[0,1],xmm2[0,2]
+; SSE-NEXT:    movaps %xmm3, %xmm2
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm6, %xmm7
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,0],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm2[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps 112(%rdi), %xmm5
+; SSE-NEXT:    movaps 112(%rdx), %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[1,3]
+; SSE-NEXT:    movaps 112(%rsi), %xmm0
+; SSE-NEXT:    movaps %xmm5, %xmm4
+; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
+; SSE-NEXT:    movaps %xmm5, %xmm3
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm2, %xmm1
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm2[3,3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
+; SSE-NEXT:    movaps %xmm2, 368(%rcx)
 ; SSE-NEXT:    movaps %xmm1, 352(%rcx)
 ; SSE-NEXT:    movaps %xmm4, 336(%rcx)
-; SSE-NEXT:    movaps %xmm6, 304(%rcx)
-; SSE-NEXT:    movaps %xmm7, 288(%rcx)
-; SSE-NEXT:    movaps %xmm8, 256(%rcx)
-; SSE-NEXT:    movaps %xmm10, 240(%rcx)
-; SSE-NEXT:    movaps %xmm11, 208(%rcx)
-; SSE-NEXT:    movaps %xmm12, 192(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 160(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 144(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 112(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 96(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 64(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 48(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, 16(%rcx)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm1, (%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0,1,3]
-; SSE-NEXT:    movaps %xmm3, 368(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT:    movaps %xmm0, 320(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0,1,3]
-; SSE-NEXT:    movaps %xmm2, 272(%rcx)
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[2,0,1,3]
-; SSE-NEXT:    movaps %xmm13, 224(%rcx)
+; SSE-NEXT:    movaps %xmm6, 320(%rcx)
+; SSE-NEXT:    movaps %xmm7, 304(%rcx)
+; SSE-NEXT:    movaps %xmm8, 288(%rcx)
+; SSE-NEXT:    movaps %xmm12, 272(%rcx)
+; SSE-NEXT:    movaps %xmm9, 256(%rcx)
+; SSE-NEXT:    movaps %xmm11, 240(%rcx)
+; SSE-NEXT:    movaps %xmm10, 224(%rcx)
+; SSE-NEXT:    movaps %xmm13, 208(%rcx)
+; SSE-NEXT:    movaps %xmm14, 192(%rcx)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
 ; SSE-NEXT:    movaps %xmm0, 176(%rcx)
-; SSE-NEXT:    movaps %xmm15, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[1,3]
+; SSE-NEXT:    movaps %xmm15, 160(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 144(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 128(%rcx)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
+; SSE-NEXT:    movaps %xmm0, 112(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 96(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 80(%rcx)
-; SSE-NEXT:    movaps %xmm5, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[1,3]
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 64(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 48(%rcx)
+; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 32(%rcx)
-; SSE-NEXT:    addq $168, %rsp
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 16(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, (%rcx)
+; SSE-NEXT:    addq $56, %rsp
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i32_stride3_vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps (%rdx), %ymm14
-; AVX1-NEXT:    vmovapd 32(%rdx), %ymm8
-; AVX1-NEXT:    vmovapd 64(%rdx), %ymm5
-; AVX1-NEXT:    vmovapd 96(%rdx), %ymm15
-; AVX1-NEXT:    vmovaps (%rsi), %xmm2
-; AVX1-NEXT:    vmovaps 16(%rsi), %xmm7
-; AVX1-NEXT:    vmovaps 32(%rsi), %xmm10
-; AVX1-NEXT:    vmovapd 48(%rsi), %xmm0
-; AVX1-NEXT:    vmovaps (%rdi), %xmm4
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm11
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm4[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[1,1],xmm3[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vbroadcastsd (%rdx), %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
-; AVX1-NEXT:    vmovaps 80(%rsi), %xmm3
-; AVX1-NEXT:    vmovaps 80(%rdi), %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm4[3,3],xmm3[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm4[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm5[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX1-NEXT:    vmovaps 64(%rsi), %xmm3
+; AVX1-NEXT:    vmovaps (%rsi), %xmm0
+; AVX1-NEXT:    vmovaps (%rdi), %xmm1
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm5
 ; AVX1-NEXT:    vmovaps 64(%rdi), %xmm4
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm4[1],xmm3[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm3[1,1],xmm6[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX1-NEXT:    vbroadcastsd 64(%rdx), %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX1-NEXT:    vmovaps 48(%rdi), %xmm3
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[3,3],xmm0[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm8[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm11[1],xmm10[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm10[1,1],xmm0[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm10[0],xmm11[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm11[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT:    vbroadcastsd 32(%rdx), %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
-; AVX1-NEXT:    vmovaps 112(%rsi), %xmm0
-; AVX1-NEXT:    vmovaps 112(%rdi), %xmm3
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[3,3],xmm0[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm15[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
-; AVX1-NEXT:    vmovaps 96(%rsi), %xmm0
-; AVX1-NEXT:    vmovaps 96(%rdi), %xmm3
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm0[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm0[1,1],xmm4[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[2,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT:    vbroadcastsd 96(%rdx), %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[3,3],xmm7[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[1,1],xmm1[0,2]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd (%rdx), %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vmovaps 80(%rdx), %xmm1
+; AVX1-NEXT:    vmovaps 80(%rsi), %xmm2
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm14[2,3,2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm5[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm4 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = ymm8[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm6 = ymm15[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vbroadcastsd 88(%rdi), %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX1-NEXT:    vmovaps 48(%rdx), %xmm2
+; AVX1-NEXT:    vmovaps 48(%rsi), %xmm3
+; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm3[3,0],xmm2[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm2[2,1],xmm6[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; AVX1-NEXT:    vbroadcastsd 56(%rdi), %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX1-NEXT:    vmovaps 112(%rdx), %xmm3
+; AVX1-NEXT:    vmovaps 112(%rsi), %xmm6
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm6[3,0],xmm3[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm3[2,1],xmm7[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm6[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm6[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm3, %ymm3
+; AVX1-NEXT:    vbroadcastsd 120(%rdi), %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
+; AVX1-NEXT:    vmovaps 64(%rsi), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm4[1],xmm6[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm6[1,1],xmm7[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm6[2,0],xmm4[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm4, %ymm4
+; AVX1-NEXT:    vbroadcastsd 64(%rdx), %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm5[1],xmm6[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm6[1,1],xmm7[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm6[2,0],xmm5[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT:    vbroadcastsd 32(%rdx), %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7]
+; AVX1-NEXT:    vmovaps 96(%rsi), %xmm6
+; AVX1-NEXT:    vmovaps 96(%rdi), %xmm7
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm6[1,1],xmm0[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[2,0],xmm7[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm6, %ymm0
+; AVX1-NEXT:    vbroadcastsd 96(%rdx), %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm0
+; AVX1-NEXT:    vmovaps 16(%rsi), %xmm7
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[3,0],xmm0[3,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm0[2,1],xmm1[0,2]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,0],xmm7[1,0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd 24(%rdi), %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm7 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6],ymm1[7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm7 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm7[2],ymm1[3,4],ymm7[5],ymm1[6,7]
 ; AVX1-NEXT:    vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm7 = ymm14[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7]
-; AVX1-NEXT:    vmovaps %ymm6, 32(%rcx)
-; AVX1-NEXT:    vmovaps %ymm5, 320(%rcx)
-; AVX1-NEXT:    vmovaps %ymm4, 128(%rcx)
-; AVX1-NEXT:    vmovaps %ymm3, 224(%rcx)
-; AVX1-NEXT:    vmovaps %ymm1, 64(%rcx)
-; AVX1-NEXT:    vmovaps %ymm0, 288(%rcx)
-; AVX1-NEXT:    vmovaps %ymm11, 352(%rcx)
-; AVX1-NEXT:    vmovaps %ymm10, 96(%rcx)
-; AVX1-NEXT:    vmovaps %ymm9, 160(%rcx)
-; AVX1-NEXT:    vmovaps %ymm13, 192(%rcx)
-; AVX1-NEXT:    vmovaps %ymm12, 256(%rcx)
-; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm10 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6],ymm7[7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm10 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm11 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5,6],ymm10[7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm11 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm11 = mem[1,0,2,2]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm12 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6],ymm11[7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7]
+; AVX1-NEXT:    vmovaps %ymm11, 32(%rcx)
+; AVX1-NEXT:    vmovaps %ymm10, 320(%rcx)
+; AVX1-NEXT:    vmovaps %ymm7, 128(%rcx)
+; AVX1-NEXT:    vmovaps %ymm1, 224(%rcx)
+; AVX1-NEXT:    vmovaps %ymm0, 64(%rcx)
+; AVX1-NEXT:    vmovaps %ymm6, 288(%rcx)
+; AVX1-NEXT:    vmovaps %ymm5, 96(%rcx)
+; AVX1-NEXT:    vmovaps %ymm4, 192(%rcx)
+; AVX1-NEXT:    vmovaps %ymm3, 352(%rcx)
+; AVX1-NEXT:    vmovaps %ymm2, 160(%rcx)
+; AVX1-NEXT:    vmovaps %ymm9, 256(%rcx)
+; AVX1-NEXT:    vmovaps %ymm8, (%rcx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1030,103 +1011,103 @@ define void @store_i32_stride3_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX2-SLOW-NEXT:    subq $40, %rsp
 ; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm3
-; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm7
-; AVX2-SLOW-NEXT:    vmovaps 32(%rsi), %ymm6
-; AVX2-SLOW-NEXT:    vmovaps 64(%rsi), %ymm10
-; AVX2-SLOW-NEXT:    vmovaps 96(%rsi), %ymm4
-; AVX2-SLOW-NEXT:    vmovaps 32(%rdx), %ymm8
-; AVX2-SLOW-NEXT:    vmovaps 64(%rdx), %ymm11
-; AVX2-SLOW-NEXT:    vmovaps 96(%rdx), %ymm5
+; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm4
+; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vmovaps 32(%rsi), %ymm5
+; AVX2-SLOW-NEXT:    vmovaps 64(%rsi), %ymm9
+; AVX2-SLOW-NEXT:    vmovaps 96(%rsi), %ymm2
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 32(%rdx), %ymm10
+; AVX2-SLOW-NEXT:    vmovaps 64(%rdx), %ymm12
+; AVX2-SLOW-NEXT:    vmovaps 96(%rdx), %ymm6
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = mem[1,0,2,2]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm0[0,0,2,1]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX2-SLOW-NEXT:    vbroadcastsd (%rdx), %ymm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd (%rdx), %ymm3
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vbroadcastsd 88(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm10[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm2[2],ymm9[3,4],ymm2[5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm11[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm9[1],ymm2[2,3],ymm9[4],ymm2[5,6],ymm9[7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm12[2,1,3,3]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm7 = ymm9[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 88(%rdi), %ymm7
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm9 = mem[1,0,2,2]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm12 = ymm7[0,0,2,1]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0],ymm9[1],ymm12[2,3],ymm9[4],ymm12[5,6],ymm9[7]
-; AVX2-SLOW-NEXT:    vbroadcastsd 64(%rdx), %ymm12
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm10[2,1,3,3]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm11 = ymm5[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 56(%rdi), %ymm11
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vbroadcastsd 56(%rdi), %ymm12
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm13 = ymm6[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm11 = ymm6[2,1,3,3]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm13 = ymm2[1,2,3,3,5,6,7,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm13 = ymm8[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6],ymm13[7]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6],ymm11[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 120(%rdi), %ymm13
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm13 = mem[1,0,2,2]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm14 = ymm3[0,0,2,1]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm14 = ymm8[0,0,2,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
-; AVX2-SLOW-NEXT:    vbroadcastsd 32(%rdx), %ymm14
+; AVX2-SLOW-NEXT:    vbroadcastsd 64(%rdx), %ymm14
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd 120(%rdi), %ymm14
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm15 = ymm4[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm15 = ymm5[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3],ymm15[4],ymm14[5,6],ymm15[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm14 = mem[1,0,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm14 = ymm14[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm15 = ymm4[0,0,2,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6],ymm14[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 32(%rdx), %ymm15
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm15 = mem[1,0,2,2]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm15 = ymm15[0,1,0,1]
-; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm0[0,0,2,1]
+; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[0,0,2,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7]
 ; AVX2-SLOW-NEXT:    vbroadcastsd 96(%rdx), %ymm15
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4],ymm15[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps (%rsi), %ymm15
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[1,2,3,3,5,6,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm9[2],ymm2[3,4],ymm9[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovaps (%rdx), %ymm9
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm12 = ymm9[2,1,3,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6],ymm12[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[0,0,3,3,4,4,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm10 = ymm11[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,0,3,3,4,4,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm6 = ymm8[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0],ymm3[1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm4 = ymm4[0,0,3,3,4,4,7,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm5[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm4 = ymm15[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm15[1,2,3,3,5,6,7,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-SLOW-NEXT:    vmovaps (%rdx), %ymm0
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm0[2,1,3,3]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2,3],ymm7[4],ymm3[5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vbroadcastsd 24(%rdi), %ymm7
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm7 = ymm8[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm8 = ymm12[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm9[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm8 = ymm10[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = mem[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm5 = mem[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm5 = ymm9[1,1,2,2]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-SLOW-NEXT:    vmovaps %ymm4, 32(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 320(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm3, 128(%rcx)
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm15[0,0,3,3,4,4,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm2, 320(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm4, 128(%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm7, 224(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm2, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm3, 64(%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, 288(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm14, 352(%rcx)
-; AVX2-SLOW-NEXT:    vmovaps %ymm13, 96(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm14, 96(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm13, 192(%rcx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm11, 352(%rcx)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 160(%rcx)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 192(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, 256(%rcx)
 ; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, (%rcx)
@@ -1137,92 +1118,94 @@ define void @store_i32_stride3_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX2-FAST-ALL-LABEL: store_i32_stride3_vf32:
 ; AVX2-FAST-ALL:       # %bb.0:
 ; AVX2-FAST-ALL-NEXT:    vmovaps (%rdi), %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovaps 32(%rdi), %ymm10
-; AVX2-FAST-ALL-NEXT:    vmovaps 64(%rdi), %ymm7
-; AVX2-FAST-ALL-NEXT:    vmovaps 96(%rdi), %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovaps (%rsi), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovaps 32(%rsi), %ymm12
+; AVX2-FAST-ALL-NEXT:    vmovaps 32(%rdi), %ymm5
+; AVX2-FAST-ALL-NEXT:    vmovaps 64(%rdi), %ymm9
+; AVX2-FAST-ALL-NEXT:    vmovaps (%rsi), %ymm1
+; AVX2-FAST-ALL-NEXT:    vmovaps 32(%rsi), %ymm7
 ; AVX2-FAST-ALL-NEXT:    vmovaps 64(%rsi), %ymm11
 ; AVX2-FAST-ALL-NEXT:    vmovaps 96(%rsi), %ymm6
-; AVX2-FAST-ALL-NEXT:    vmovaps 32(%rdx), %ymm13
-; AVX2-FAST-ALL-NEXT:    vmovaps 64(%rdx), %ymm14
-; AVX2-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = [1,0,2,2,1,0,2,2]
-; AVX2-FAST-ALL-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm2, %ymm8, %ymm1
+; AVX2-FAST-ALL-NEXT:    vmovaps 32(%rdx), %ymm12
+; AVX2-FAST-ALL-NEXT:    vmovaps 64(%rdx), %ymm4
+; AVX2-FAST-ALL-NEXT:    vmovaps 96(%rdx), %ymm15
+; AVX2-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [1,0,2,2,1,0,2,2]
+; AVX2-FAST-ALL-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm10, %ymm2
 ; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
 ; AVX2-FAST-ALL-NEXT:    vbroadcastsd (%rdx), %ymm3
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm9 = [5,6,5,6,5,6,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm11, %ymm9, %ymm3
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 88(%rdi), %ymm5
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4],ymm5[5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm5 = ymm14[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm11, %ymm8, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm15 = ymm7[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm15[0],ymm5[1],ymm15[2,3],ymm5[4],ymm15[5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 64(%rdx), %ymm15
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm15[2],ymm5[3,4],ymm15[5],ymm5[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,0,3,3,4,4,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm11 = ymm14[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm7 = ymm11[0],ymm7[1,2],ymm11[3],ymm7[4,5],ymm11[6],ymm7[7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm11 = ymm12[0,0,3,3,4,4,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm10[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm11 = ymm14[0,1],ymm11[2],ymm14[3,4],ymm11[5],ymm14[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm13[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm11 = ymm14[0],ymm11[1,2],ymm14[3],ymm11[4,5],ymm14[6],ymm11[7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm12, %ymm9, %ymm14
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 56(%rdi), %ymm15
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovaps 96(%rdx), %ymm15
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm12, %ymm8, %ymm12
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0],ymm12[1],ymm10[2,3],ymm12[4],ymm10[5,6],ymm12[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 32(%rdx), %ymm12
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm12 = ymm6[0,0,3,3,4,4,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm4[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm12 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm15[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm12 = ymm14[0],ymm12[1,2],ymm14[3],ymm12[4,5],ymm14[6],ymm12[7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm6, %ymm9, %ymm14
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 120(%rdi), %ymm1
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm1[2],ymm14[3,4],ymm1[5],ymm14[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm15[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3],ymm14[4],ymm1[5,6],ymm14[7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm6, %ymm8, %ymm6
-; AVX2-FAST-ALL-NEXT:    vmovaps (%rdx), %ymm8
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7]
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 96(%rdx), %ymm6
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermps %ymm2, %ymm9, %ymm6
-; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rdi), %ymm9
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm3 = ymm9[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm8 = ymm4[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0],ymm3[1],ymm8[2,3],ymm3[4],ymm8[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm8 = ymm11[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm8[2],ymm3[3,4],ymm8[5],ymm3[6,7]
+; AVX2-FAST-ALL-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm13 = [5,6,5,6,5,6,7,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm11, %ymm13, %ymm8
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5,6],ymm4[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 88(%rdi), %ymm8
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm8[2],ymm4[3,4],ymm8[5],ymm4[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm7, %ymm13, %ymm8
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm12[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm14[1],ymm8[2,3],ymm14[4],ymm8[5,6],ymm14[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 56(%rdi), %ymm14
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm14[2],ymm8[3,4],ymm14[5],ymm8[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm14 = ymm5[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm12 = ymm12[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5,6],ymm14[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm14 = ymm7[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm6, %ymm13, %ymm14
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm15[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm14[0],ymm2[1],ymm14[2,3],ymm2[4],ymm14[5,6],ymm2[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 120(%rdi), %ymm14
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm14 = ymm2[0,1],ymm14[2],ymm2[3,4],ymm14[5],ymm2[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm15[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vmovaps 96(%rdi), %ymm15
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm3 = ymm15[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm3 = ymm6[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm11, %ymm10, %ymm3
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm9[0],ymm3[1],ymm9[2,3],ymm3[4],ymm9[5,6],ymm3[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 64(%rdx), %ymm9
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm9[2],ymm3[3,4],ymm9[5],ymm3[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm7, %ymm10, %ymm7
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm7[1],ymm5[2,3],ymm7[4],ymm5[5,6],ymm7[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 32(%rdx), %ymm7
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3,4],ymm7[5],ymm5[6,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm6, %ymm10, %ymm6
+; AVX2-FAST-ALL-NEXT:    vmovaps (%rdx), %ymm7
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm9 = ymm15[0,0,2,1]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0],ymm6[1],ymm9[2,3],ymm6[4],ymm9[5,6],ymm6[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 96(%rdx), %ymm9
 ; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm9[2],ymm6[3,4],ymm9[5],ymm6[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm9 = ymm8[2,1,3,3]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm9[1],ymm6[2,3],ymm9[4],ymm6[5,6],ymm9[7]
-; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vpermps %ymm1, %ymm13, %ymm9
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm10 = ymm7[2,1,3,3]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
+; AVX2-FAST-ALL-NEXT:    vbroadcastsd 24(%rdi), %ymm10
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
 ; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm2 = ymm8[1,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[1,1,2,2]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6],ymm0[7]
+; AVX2-FAST-ALL-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
+; AVX2-FAST-ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm6, 64(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm4, 288(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm1, 352(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm12, 320(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm10, 96(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm13, 160(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm11, 128(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm7, 224(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm5, 192(%rcx)
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm3, 256(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm9, 64(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm6, 288(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm5, 96(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm3, 192(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm2, 320(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm14, 352(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm12, 128(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm8, 160(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm4, 256(%rcx)
+; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 224(%rcx)
 ; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX2-FAST-ALL-NEXT:    vzeroupper
@@ -1233,103 +1216,103 @@ define void @store_i32_stride3_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    subq $40, %rsp
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rsi), %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rsi), %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rsi), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdx), %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdx), %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdx), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rsi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rsi), %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rsi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdx), %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdx), %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdx), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = mem[1,0,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm2 = ymm0[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rdx), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd (%rdx), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 88(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm10[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm2[2],ymm9[3,4],ymm2[5],ymm9[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm11[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm9[1],ymm2[2,3],ymm9[4],ymm2[5,6],ymm9[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm12[2,1,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm7 = ymm9[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 88(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm9 = mem[1,0,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm12 = ymm7[0,0,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0],ymm9[1],ymm12[2,3],ymm9[4],ymm12[5,6],ymm9[7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 64(%rdx), %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm10[2,1,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm11 = ymm5[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6],ymm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 56(%rdi), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 56(%rdi), %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm13 = ymm6[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm11 = ymm6[2,1,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm13 = ymm2[1,2,3,3,5,6,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm13 = ymm8[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6],ymm13[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6],ymm11[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 120(%rdi), %ymm13
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm13 = mem[1,0,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm14 = ymm3[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm14 = ymm8[0,0,2,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 32(%rdx), %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 64(%rdx), %ymm14
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 120(%rdi), %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm15 = ymm4[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm15 = ymm5[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3],ymm15[4],ymm14[5,6],ymm15[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm14 = mem[1,0,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm14 = ymm14[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm15 = ymm4[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6],ymm14[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 32(%rdx), %ymm15
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm15 = mem[1,0,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm15 = ymm15[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm0[0,0,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm2[0,0,2,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 96(%rdx), %ymm15
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4],ymm15[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rsi), %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[1,2,3,3,5,6,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rdi), %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm9[2],ymm2[3,4],ymm9[5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdx), %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm12 = ymm9[2,1,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6],ymm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[0,0,3,3,4,4,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm10 = ymm11[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,0,3,3,4,4,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm6 = ymm8[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0],ymm3[1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm4 = ymm4[0,0,3,3,4,4,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm5[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm4 = ymm15[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm15[1,2,3,3,5,6,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdx), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm0[2,1,3,3]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2,3],ymm7[4],ymm3[5,6],ymm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd 24(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm7 = ymm8[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm8 = ymm12[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm9[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm8 = ymm10[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5,6],ymm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm5 = ymm9[1,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, 32(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 320(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, 128(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm15[0,0,3,3,4,4,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, 320(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, 128(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, 224(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, 64(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, 64(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 288(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm14, 352(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm13, 96(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm14, 96(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm13, 192(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm11, 352(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 160(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 192(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 256(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, (%rcx)
@@ -1350,22 +1333,22 @@ define void @store_i32_stride3_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX512-NEXT:    vpermt2d %zmm2, %zmm6, %zmm7
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15]
 ; AVX512-NEXT:    vpermt2d %zmm4, %zmm8, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <u,11,27,u,12,28,u,13,29,u,14,30,u,15,31,u>
-; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm10
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm9, %zmm10
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31]
-; AVX512-NEXT:    vpermt2d %zmm5, %zmm11, %zmm10
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <5,u,22,6,u,23,7,u,24,8,u,25,9,u,26,10>
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <10,27,u,11,28,u,12,29,u,13,30,u,14,31,u,15>
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512-NEXT:    vpermt2d %zmm1, %zmm9, %zmm10
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,1,27,3,4,28,6,7,29,9,10,30,12,13,31,15]
+; AVX512-NEXT:    vpermt2d %zmm3, %zmm11, %zmm10
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <5,21,u,6,22,u,7,23,u,8,24,u,9,25,u,10>
 ; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512-NEXT:    vpermt2d %zmm1, %zmm12, %zmm13
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15]
-; AVX512-NEXT:    vpermt2d %zmm5, %zmm14, %zmm13
+; AVX512-NEXT:    vpermt2d %zmm5, %zmm12, %zmm13
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,1,22,3,4,23,6,7,24,9,10,25,12,13,26,15]
+; AVX512-NEXT:    vpermt2d %zmm1, %zmm14, %zmm13
 ; AVX512-NEXT:    vpermt2d %zmm3, %zmm6, %zmm1
 ; AVX512-NEXT:    vpermt2d %zmm5, %zmm8, %zmm1
-; AVX512-NEXT:    vpermi2d %zmm2, %zmm0, %zmm9
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm11, %zmm9
-; AVX512-NEXT:    vpermt2d %zmm0, %zmm12, %zmm2
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm14, %zmm2
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm4, %zmm9
+; AVX512-NEXT:    vpermt2d %zmm2, %zmm11, %zmm9
+; AVX512-NEXT:    vpermt2d %zmm4, %zmm12, %zmm2
+; AVX512-NEXT:    vpermt2d %zmm0, %zmm14, %zmm2
 ; AVX512-NEXT:    vmovdqu64 %zmm2, 64(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm9, 128(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm1, 192(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
index 3c78fee556dc5..019068553ca47 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
@@ -13,14 +13,14 @@ define void @store_i32_stride4_vf2(<2 x i32>* %in.vecptr0, <2 x i32>* %in.vecptr
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; SSE-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
-; SSE-NEXT:    movaps %xmm1, 16(%r8)
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; SSE-NEXT:    movaps %xmm0, 16(%r8)
 ; SSE-NEXT:    movaps %xmm2, (%r8)
 ; SSE-NEXT:    retq
 ;
@@ -469,176 +469,176 @@ define void @store_i32_stride4_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
 ; AVX1-LABEL: store_i32_stride4_vf16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    subq $24, %rsp
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm7
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm6
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm2
+; AVX1-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm13
 ; AVX1-NEXT:    vmovaps 48(%rdi), %xmm11
-; AVX1-NEXT:    vmovaps 16(%rsi), %xmm13
-; AVX1-NEXT:    vmovaps 32(%rsi), %xmm14
-; AVX1-NEXT:    vmovaps 48(%rsi), %xmm10
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm11[1],xmm10[1],zero,zero
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm8
-; AVX1-NEXT:    vmovaps 16(%rcx), %xmm12
+; AVX1-NEXT:    vmovaps 16(%rsi), %xmm1
+; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm8
+; AVX1-NEXT:    vmovaps 48(%rsi), %xmm9
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm10
+; AVX1-NEXT:    vmovaps 16(%rcx), %xmm0
+; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps 32(%rcx), %xmm3
-; AVX1-NEXT:    vmovaps 48(%rcx), %xmm2
+; AVX1-NEXT:    vmovaps 48(%rcx), %xmm6
 ; AVX1-NEXT:    vmovaps 16(%rdx), %xmm15
-; AVX1-NEXT:    vmovaps 32(%rdx), %xmm1
-; AVX1-NEXT:    vmovaps 48(%rdx), %xmm4
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm2[0],xmm4[0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
+; AVX1-NEXT:    vmovaps 32(%rdx), %xmm4
+; AVX1-NEXT:    vmovaps 48(%rdx), %xmm5
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm15[0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3],ymm10[4,5],ymm0[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = xmm6[1],xmm14[1],zero,zero
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm6[0],xmm14[0],xmm6[1],xmm14[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm8, %ymm8
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm3[0],xmm1[0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm13[1],xmm8[1],zero,zero
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm3[0],xmm4[0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm7, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = xmm7[1],xmm13[1],zero,zero
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm13[0],xmm7[1],xmm13[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm12[0],xmm15[0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3],ymm5[4,5],ymm6[6,7]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm11[1],xmm9[1],zero,zero
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm6[0],xmm5[0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm1, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps (%rdi), %xmm8
-; AVX1-NEXT:    vmovaps (%rsi), %xmm7
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm6 = xmm8[1],xmm7[1],zero,zero
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm9, %ymm9
-; AVX1-NEXT:    vmovaps (%rcx), %xmm6
+; AVX1-NEXT:    vmovaps (%rdi), %xmm2
+; AVX1-NEXT:    vmovaps (%rsi), %xmm1
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm7 = xmm2[1],xmm1[1],zero,zero
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm10 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm10, %ymm10
+; AVX1-NEXT:    vmovaps (%rcx), %xmm7
 ; AVX1-NEXT:    vmovaps (%rdx), %xmm0
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm6[0],xmm0[0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm13, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm6[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm7[3,0],xmm8[3,0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm0[0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm12 = xmm12[0,1,2,0]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm14, %ymm12, %ymm12
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm12 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = zero,zero,xmm4[2],xmm3[2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm3, %ymm12
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm8[3,0],xmm13[3,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2,3],ymm3[4,5],ymm12[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm5[2],xmm6[2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm9[3,0],xmm11[3,0]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0,1],ymm0[2,3],ymm5[4,5],ymm0[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm4[2],xmm2[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm10[3,0],xmm11[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm1[2],xmm3[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm14[3,0],xmm3[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm15[2],xmm12[2],xmm15[3],xmm12[3]
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = zero,zero,xmm15[2],xmm12[2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm7[2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[3,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm5, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm15[2],xmm2[2],xmm15[3],xmm2[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm15[2],xmm2[2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[3,0],xmm5[3,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX1-NEXT:    vmovaps %ymm2, 96(%r8)
-; AVX1-NEXT:    vmovaps %ymm1, 160(%r8)
-; AVX1-NEXT:    vmovaps %ymm0, 224(%r8)
-; AVX1-NEXT:    vmovaps %ymm8, 32(%r8)
-; AVX1-NEXT:    vmovaps %ymm9, (%r8)
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[3,0],xmm6[3,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[2,0,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
+; AVX1-NEXT:    vmovaps %ymm1, 96(%r8)
+; AVX1-NEXT:    vmovaps %ymm0, 32(%r8)
+; AVX1-NEXT:    vmovaps %ymm4, 224(%r8)
+; AVX1-NEXT:    vmovaps %ymm3, 160(%r8)
+; AVX1-NEXT:    vmovaps %ymm10, (%r8)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 64(%r8)
+; AVX1-NEXT:    vmovaps %ymm0, 192(%r8)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm0, 128(%r8)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 192(%r8)
+; AVX1-NEXT:    vmovaps %ymm0, 64(%r8)
 ; AVX1-NEXT:    addq $24, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_i32_stride4_vf16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovaps (%rdi), %ymm9
-; AVX2-NEXT:    vmovaps (%rcx), %xmm10
-; AVX2-NEXT:    vmovaps 32(%rcx), %xmm3
-; AVX2-NEXT:    vmovaps (%rdx), %xmm5
-; AVX2-NEXT:    vmovaps 32(%rdx), %xmm6
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm10[2],xmm5[3],xmm10[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX2-NEXT:    vmovaps (%rsi), %xmm7
-; AVX2-NEXT:    vmovaps 32(%rsi), %xmm2
-; AVX2-NEXT:    vmovaps (%rdi), %xmm0
-; AVX2-NEXT:    vmovaps 32(%rdi), %xmm4
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm8 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,1,1,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm1[2,3],ymm8[4,5],ymm1[6,7]
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
+; AVX2-NEXT:    vmovaps 32(%rdi), %ymm13
+; AVX2-NEXT:    vmovaps (%rdi), %ymm11
+; AVX2-NEXT:    vmovaps 32(%rsi), %ymm14
+; AVX2-NEXT:    vmovaps (%rsi), %ymm12
+; AVX2-NEXT:    vmovaps 32(%rdx), %ymm5
+; AVX2-NEXT:    vmovaps (%rdx), %ymm15
+; AVX2-NEXT:    vmovaps 32(%rcx), %ymm7
+; AVX2-NEXT:    vmovaps (%rcx), %xmm6
+; AVX2-NEXT:    vmovaps 32(%rcx), %xmm0
+; AVX2-NEXT:    vmovaps (%rdx), %xmm1
+; AVX2-NEXT:    vmovaps 32(%rdx), %xmm2
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm8 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm11 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[0,1,1,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3],ymm11[4,5],ymm8[6,7]
-; AVX2-NEXT:    vmovaps 32(%rdi), %ymm11
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX2-NEXT:    vmovaps (%rsi), %ymm6
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX2-NEXT:    vmovaps 32(%rsi), %ymm4
+; AVX2-NEXT:    vmovaps 32(%rsi), %xmm3
+; AVX2-NEXT:    vmovaps 32(%rdi), %xmm4
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm8
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX2-NEXT:    vmovaps (%rdi), %xmm2
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,1,1,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-NEXT:    vmovaps (%rcx), %ymm3
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,1,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
-; AVX2-NEXT:    vmovaps 32(%rdx), %ymm2
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
-; AVX2-NEXT:    vmovaps 32(%rcx), %ymm10
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5],ymm5[6,7]
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm5 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[4],ymm10[4],ymm2[5],ymm10[5]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm7 = ymm11[0],ymm4[0],ymm11[1],ymm4[1],ymm11[4],ymm4[4],ymm11[5],ymm4[5]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[2,1,3,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
-; AVX2-NEXT:    vmovaps (%rdx), %ymm7
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[6],ymm10[6],ymm2[7],ymm10[7]
-; AVX2-NEXT:    vmovaps (%rcx), %ymm10
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm2 = ymm15[0],ymm3[0],ymm15[1],ymm3[1],ymm15[4],ymm3[4],ymm15[5],ymm3[5]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm4 = ymm11[2],ymm4[2],ymm11[3],ymm4[3],ymm11[6],ymm4[6],ymm11[7],ymm4[7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[4],ymm12[4],ymm11[5],ymm12[5]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm4 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[4],ymm10[4],ymm7[5],ymm10[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm11 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[4],ymm6[4],ymm9[5],ymm6[5]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2,3],ymm11[4,5],ymm4[6,7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm7 = ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[6],ymm10[6],ymm7[7],ymm10[7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,2,3]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
-; AVX2-NEXT:    vmovaps %ymm6, 96(%r8)
-; AVX2-NEXT:    vmovaps %ymm4, 64(%r8)
-; AVX2-NEXT:    vmovaps %ymm2, 224(%r8)
+; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm5 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm6 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm3 = ymm15[2],ymm3[2],ymm15[3],ymm3[3],ymm15[6],ymm3[6],ymm15[7],ymm3[7]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5],ymm3[6,7]
+; AVX2-NEXT:    vmovaps %ymm3, 96(%r8)
 ; AVX2-NEXT:    vmovaps %ymm5, 192(%r8)
-; AVX2-NEXT:    vmovaps %ymm0, (%r8)
-; AVX2-NEXT:    vmovaps %ymm3, 160(%r8)
-; AVX2-NEXT:    vmovaps %ymm8, 128(%r8)
-; AVX2-NEXT:    vmovaps %ymm1, 32(%r8)
+; AVX2-NEXT:    vmovaps %ymm4, 224(%r8)
+; AVX2-NEXT:    vmovaps %ymm2, 64(%r8)
+; AVX2-NEXT:    vmovaps %ymm1, (%r8)
+; AVX2-NEXT:    vmovaps %ymm9, 32(%r8)
+; AVX2-NEXT:    vmovaps %ymm0, 128(%r8)
+; AVX2-NEXT:    vmovaps %ymm10, 160(%r8)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -1152,49 +1152,49 @@ define void @store_i32_stride4_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX2-NEXT:    vmovaps (%rdx), %xmm14
 ; AVX2-NEXT:    vmovaps 32(%rdx), %xmm12
 ; AVX2-NEXT:    vmovaps 64(%rdx), %xmm3
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm8 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
 ; AVX2-NEXT:    vmovaps 32(%rsi), %xmm4
 ; AVX2-NEXT:    vmovaps 64(%rsi), %xmm7
 ; AVX2-NEXT:    vmovaps 32(%rdi), %xmm0
 ; AVX2-NEXT:    vmovaps 64(%rdi), %xmm5
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
 ; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
 ; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm5 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
 ; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
 ; AVX2-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
 ; AVX2-NEXT:    vmovaps 96(%rcx), %xmm10
 ; AVX2-NEXT:    vmovaps 96(%rdx), %xmm3
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm4[0,0,2,1]
 ; AVX2-NEXT:    vmovaps 96(%rsi), %xmm4
 ; AVX2-NEXT:    vmovaps 96(%rdi), %xmm0
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm12 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm12 = ymm12[0,1,1,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm8[2,3],ymm12[4,5],ymm8[6,7]
 ; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-NEXT:    vmovaps (%rsi), %xmm1
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
 ; AVX2-NEXT:    vmovaps (%rdi), %xmm10
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX2-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
 ; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -1219,43 +1219,43 @@ define void @store_i32_stride4_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX2-NEXT:    vunpcklps {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm4[0,1],ymm1[2,3],ymm4[4,5],ymm1[6,7]
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[4],ymm0[4],ymm6[5],ymm0[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[6],ymm0[6],ymm6[7],ymm0[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
 ; AVX2-NEXT:    vmovaps 64(%rdi), %ymm10
 ; AVX2-NEXT:    vmovaps 64(%rsi), %ymm14
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm9 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm9 = ymm10[2],ymm14[2],ymm10[3],ymm14[3],ymm10[6],ymm14[6],ymm10[7],ymm14[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm1[2,3],ymm9[4,5],ymm1[6,7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[6],ymm0[6],ymm6[7],ymm0[7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm10[2],ymm14[2],ymm10[3],ymm14[3],ymm10[6],ymm14[6],ymm10[7],ymm14[7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[4],ymm0[4],ymm6[5],ymm0[5]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm6 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm0[2,3],ymm6[4,5],ymm0[6,7]
 ; AVX2-NEXT:    vmovaps 32(%rdx), %ymm6
 ; AVX2-NEXT:    vmovaps 32(%rcx), %ymm9
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm10 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,2,2,3]
 ; AVX2-NEXT:    vmovaps 32(%rdi), %ymm14
 ; AVX2-NEXT:    vmovaps 32(%rsi), %ymm0
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm11 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm11 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[0,2,2,3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm6[2,3],ymm0[4,5],ymm6[6,7]
 ; AVX2-NEXT:    vmovaps 96(%rdx), %ymm6
 ; AVX2-NEXT:    vmovaps 96(%rcx), %ymm9
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm11 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm11 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[0,2,2,3]
 ; AVX2-NEXT:    vmovaps 96(%rdi), %ymm14
 ; AVX2-NEXT:    vmovaps 96(%rsi), %ymm0
-; AVX2-NEXT:    vunpcklps {{.*#+}} ymm8 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
+; AVX2-NEXT:    vunpckhps {{.*#+}} ymm8 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm11[2,3],ymm8[4,5],ymm11[6,7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
+; AVX2-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[0,2,2,3]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5],ymm6[6,7]
@@ -1265,28 +1265,28 @@ define void @store_i32_stride4_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
 ; AVX2-NEXT:    vmovaps %ymm6, 96(%r8)
-; AVX2-NEXT:    vmovaps %ymm0, 480(%r8)
-; AVX2-NEXT:    vmovaps %ymm8, 448(%r8)
-; AVX2-NEXT:    vmovaps %ymm1, 224(%r8)
-; AVX2-NEXT:    vmovaps %ymm10, 192(%r8)
-; AVX2-NEXT:    vmovaps %ymm4, 352(%r8)
-; AVX2-NEXT:    vmovaps %ymm7, 320(%r8)
+; AVX2-NEXT:    vmovaps %ymm0, 448(%r8)
+; AVX2-NEXT:    vmovaps %ymm8, 480(%r8)
+; AVX2-NEXT:    vmovaps %ymm1, 192(%r8)
+; AVX2-NEXT:    vmovaps %ymm10, 224(%r8)
+; AVX2-NEXT:    vmovaps %ymm4, 320(%r8)
+; AVX2-NEXT:    vmovaps %ymm7, 352(%r8)
 ; AVX2-NEXT:    vmovaps %ymm12, 64(%r8)
 ; AVX2-NEXT:    vmovaps %ymm15, (%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-NEXT:    vmovaps %ymm0, 32(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 416(%r8)
-; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-NEXT:    vmovaps %ymm0, 384(%r8)
-; AVX2-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 160(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 416(%r8)
+; AVX2-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX2-NEXT:    vmovaps %ymm0, 128(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 288(%r8)
+; AVX2-NEXT:    vmovaps %ymm0, 160(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-NEXT:    vmovaps %ymm0, 256(%r8)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 288(%r8)
 ; AVX2-NEXT:    addq $168, %rsp
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index de4c18d949020..36c95807e44d5 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -14,22 +14,19 @@ define void @store_i32_stride6_vf2(<2 x i32>* %in.vecptr0, <2 x i32>* %in.vecptr
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; SSE-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
-; SSE-NEXT:    movsd {{.*#+}} xmm4 = mem[0],zero
-; SSE-NEXT:    movsd {{.*#+}} xmm5 = mem[0],zero
-; SSE-NEXT:    movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; SSE-NEXT:    movaps %xmm0, %xmm5
-; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm1[0]
-; SSE-NEXT:    movaps %xmm2, %xmm6
-; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm3[0]
-; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,3],xmm4[1,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[1,3]
-; SSE-NEXT:    movaps %xmm6, 32(%rax)
-; SSE-NEXT:    movaps %xmm4, 16(%rax)
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[1,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; SSE-NEXT:    movaps %xmm1, 32(%rax)
+; SSE-NEXT:    movaps %xmm3, 16(%rax)
 ; SSE-NEXT:    movaps %xmm0, (%rax)
 ; SSE-NEXT:    retq
 ;
@@ -175,77 +172,75 @@ define void @store_i32_stride6_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr
 ; SSE-LABEL: store_i32_stride6_vf4:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movaps (%rdi), %xmm0
-; SSE-NEXT:    movaps (%rsi), %xmm8
-; SSE-NEXT:    movaps (%rdx), %xmm1
-; SSE-NEXT:    movaps (%rcx), %xmm9
-; SSE-NEXT:    movaps (%r8), %xmm2
-; SSE-NEXT:    movaps (%r9), %xmm6
-; SSE-NEXT:    movaps %xmm1, %xmm3
-; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
-; SSE-NEXT:    movaps %xmm2, %xmm4
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm6[1,1]
-; SSE-NEXT:    movaps %xmm2, %xmm5
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm6[1]
-; SSE-NEXT:    movaps %xmm2, %xmm7
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[2,3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm4[0,2]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[2,3]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm7[0,2]
-; SSE-NEXT:    movaps %xmm6, 16(%rax)
-; SSE-NEXT:    movaps %xmm0, 48(%rax)
+; SSE-NEXT:    movapd (%rdi), %xmm0
+; SSE-NEXT:    movapd (%rsi), %xmm9
+; SSE-NEXT:    movapd (%rdx), %xmm2
+; SSE-NEXT:    movapd (%rcx), %xmm8
+; SSE-NEXT:    movapd (%r8), %xmm1
+; SSE-NEXT:    movapd (%r9), %xmm5
+; SSE-NEXT:    movapd %xmm2, %xmm6
+; SSE-NEXT:    unpcklps {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
+; SSE-NEXT:    movapd %xmm0, %xmm7
+; SSE-NEXT:    unpcklps {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
+; SSE-NEXT:    movapd %xmm7, %xmm3
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm6[0]
+; SSE-NEXT:    movapd %xmm1, %xmm4
+; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
+; SSE-NEXT:    movapd %xmm0, %xmm5
+; SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE-NEXT:    shufpd {{.*#+}} xmm4 = xmm4[0],xmm7[1]
+; SSE-NEXT:    movapd %xmm4, 16(%rax)
+; SSE-NEXT:    movapd %xmm0, 48(%rax)
 ; SSE-NEXT:    movaps %xmm1, 80(%rax)
-; SSE-NEXT:    movaps %xmm5, 64(%rax)
-; SSE-NEXT:    movaps %xmm3, 32(%rax)
-; SSE-NEXT:    movaps %xmm2, (%rax)
+; SSE-NEXT:    movapd %xmm5, 64(%rax)
+; SSE-NEXT:    movapd %xmm6, 32(%rax)
+; SSE-NEXT:    movapd %xmm3, (%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i32_stride6_vf4:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX1-NEXT:    vmovaps (%rdi), %xmm0
-; AVX1-NEXT:    vmovaps (%rsi), %xmm2
-; AVX1-NEXT:    vmovaps (%rdx), %xmm1
+; AVX1-NEXT:    vmovaps (%rsi), %xmm1
+; AVX1-NEXT:    vmovaps (%rdx), %xmm2
 ; AVX1-NEXT:    vmovaps (%rcx), %xmm3
 ; AVX1-NEXT:    vmovaps (%r8), %xmm4
 ; AVX1-NEXT:    vmovaps (%r9), %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm13
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm8
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm9
-; AVX1-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm11
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm6[0],ymm11[0],ymm6[2],ymm11[2]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,2,3,1,4,6,7,5]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm7 = xmm3[0,0],xmm1[0,0]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3],ymm12[4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5],ymm7[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[6],ymm6[6],ymm11[7],ymm6[7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm10
-; AVX1-NEXT:    vshufps {{.*#+}} ymm10 = ymm13[1,2],ymm10[1,2],ymm13[5,6],ymm10[5,6]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[0,2,3,1,4,6,7,5]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5],ymm10[6,7]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5,6,7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm8[1],ymm9[1],ymm8[3],ymm9[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm7
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm8 = ymm7[0],ymm12[0],ymm7[1],ymm12[1],ymm7[4],ymm12[4],ymm7[5],ymm12[5]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm10
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm6 = xmm3[0,0],xmm2[0,0]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm11[0,1],ymm6[2,3],ymm11[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5],ymm6[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[6],ymm10[6],ymm9[7],ymm10[7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm9
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[3,3],ymm2[3,3],ymm0[7,7],ymm2[7,7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5],ymm5[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm2[1,2],ymm9[1,2],ymm2[5,6],ymm9[5,6]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5],ymm3[6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm12[1],ymm7[1],ymm12[3],ymm7[3]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm4 = ymm4[0,2,3,1,4,6,7,5]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[3,0],ymm9[3,0],ymm2[7,4],ymm9[7,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-NEXT:    vmovaps %ymm0, 64(%rax)
-; AVX1-NEXT:    vmovaps %ymm4, 32(%rax)
-; AVX1-NEXT:    vmovaps %ymm7, (%rax)
+; AVX1-NEXT:    vmovaps %ymm3, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm6, (%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -255,40 +250,39 @@ define void @store_i32_stride6_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr
 ; AVX2-NEXT:    vmovaps (%rdi), %xmm0
 ; AVX2-NEXT:    vmovaps (%rsi), %xmm1
 ; AVX2-NEXT:    vmovaps (%rdx), %xmm2
-; AVX2-NEXT:    vmovaps (%rcx), %xmm3
-; AVX2-NEXT:    vmovaps (%r8), %xmm4
-; AVX2-NEXT:    vmovaps (%r9), %xmm5
+; AVX2-NEXT:    vmovaps (%r8), %xmm3
+; AVX2-NEXT:    vmovaps (%r9), %xmm4
+; AVX2-NEXT:    vmovaps {{.*#+}} xmm5 = <u,u,0,4>
+; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm2
+; AVX2-NEXT:    vpermps %ymm2, %ymm5, %ymm5
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm6
-; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm7
-; AVX2-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm8
-; AVX2-NEXT:    vmovaps {{.*#+}} xmm9 = <u,u,0,4>
-; AVX2-NEXT:    vpermps %ymm7, %ymm9, %ymm9
-; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,4,1,5,0,4,1,5]
-; AVX2-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX2-NEXT:    vpermps %ymm6, %ymm10, %ymm10
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [17179869184,17179869184,17179869184,17179869184]
-; AVX2-NEXT:    vpermps %ymm8, %ymm10, %ymm10
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
-; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [25769803778,25769803778,25769803778,25769803778]
-; AVX2-NEXT:    vpermps %ymm6, %ymm10, %ymm6
-; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [1,5,2,6,1,5,2,6]
-; AVX2-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX2-NEXT:    vpermps %ymm7, %ymm10, %ymm7
-; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
-; AVX2-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5,6,7]
-; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = [0,4,1,5,0,4,1,5]
+; AVX2-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX2-NEXT:    vpermps %ymm6, %ymm7, %ymm7
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5,6,7]
+; AVX2-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm7
+; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm8 = [17179869184,17179869184,17179869184,17179869184]
+; AVX2-NEXT:    vpermps %ymm7, %ymm8, %ymm8
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5],ymm5[6,7]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = [1,5,2,6,1,5,2,6]
+; AVX2-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX2-NEXT:    vpermps %ymm2, %ymm4, %ymm4
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm4 = [25769803778,25769803778,25769803778,25769803778]
+; AVX2-NEXT:    vpermps %ymm6, %ymm4, %ymm4
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [2,6,3,7,2,6,3,7]
 ; AVX2-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpermps %ymm8, %ymm1, %ymm1
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX2-NEXT:    vpermps %ymm7, %ymm1, %ymm1
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [30064771075,30064771075,30064771075,30064771075]
+; AVX2-NEXT:    vpermps %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX2-NEXT:    vmovaps %ymm0, 64(%rax)
-; AVX2-NEXT:    vmovaps %ymm4, 32(%rax)
-; AVX2-NEXT:    vmovaps %ymm9, (%rax)
+; AVX2-NEXT:    vmovaps %ymm3, 32(%rax)
+; AVX2-NEXT:    vmovaps %ymm5, (%rax)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -302,8 +296,8 @@ define void @store_i32_stride6_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr
 ; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vinserti32x4 $1, (%r9), %zmm2, %zmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [18,22,3,7,11,15,19,23]
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,6,19,23,27,31,3,7]
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,4,8,12,16,20,1,5,9,13,17,21,2,6,10,14]
 ; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
 ; AVX512-NEXT:    vmovdqu64 %zmm3, (%rax)
@@ -334,393 +328,206 @@ define void @store_i32_stride6_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr
 ; SSE-LABEL: store_i32_stride6_vf8:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movaps (%rdi), %xmm2
-; SSE-NEXT:    movaps 16(%rdi), %xmm8
-; SSE-NEXT:    movaps (%rsi), %xmm0
-; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 16(%rsi), %xmm13
-; SSE-NEXT:    movaps (%rdx), %xmm4
-; SSE-NEXT:    movaps 16(%rdx), %xmm1
-; SSE-NEXT:    movaps (%rcx), %xmm11
-; SSE-NEXT:    movaps 16(%rcx), %xmm15
-; SSE-NEXT:    movaps (%r8), %xmm10
-; SSE-NEXT:    movaps 16(%r8), %xmm0
-; SSE-NEXT:    movaps (%r9), %xmm7
-; SSE-NEXT:    movaps 16(%r9), %xmm5
-; SSE-NEXT:    movaps %xmm0, %xmm6
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[3,3],xmm5[3,3]
-; SSE-NEXT:    movaps %xmm1, %xmm9
-; SSE-NEXT:    unpckhps {{.*#+}} xmm9 = xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[2,3],xmm6[0,2]
-; SSE-NEXT:    movaps %xmm8, %xmm14
-; SSE-NEXT:    unpckhps {{.*#+}} xmm14 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; SSE-NEXT:    movaps %xmm0, %xmm12
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm5[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[0,2],xmm14[2,3]
-; SSE-NEXT:    movaps %xmm15, %xmm3
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm14 = xmm14[0,1],xmm3[2,0]
-; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm5[1,1]
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1]
-; SSE-NEXT:    unpcklps {{.*#+}} xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1]
-; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[2,0],xmm8[2,3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm8 = xmm8[0],xmm1[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm3[0,2]
-; SSE-NEXT:    movaps %xmm10, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3],xmm7[3,3]
-; SSE-NEXT:    movaps %xmm4, %xmm13
-; SSE-NEXT:    unpckhps {{.*#+}} xmm13 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[2,3],xmm0[0,2]
-; SSE-NEXT:    movaps %xmm2, %xmm0
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE-NEXT:    movaps %xmm10, %xmm3
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm7[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[2,3]
-; SSE-NEXT:    movaps %xmm11, %xmm6
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,0]
-; SSE-NEXT:    movaps %xmm10, %xmm6
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[1,1],xmm7[1,1]
-; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
-; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1]
-; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm10[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm2[2,3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,3],xmm6[0,2]
-; SSE-NEXT:    movaps %xmm2, (%rax)
+; SSE-NEXT:    movaps (%rdi), %xmm1
+; SSE-NEXT:    movaps 16(%rdi), %xmm6
+; SSE-NEXT:    movaps (%rsi), %xmm8
+; SSE-NEXT:    movaps 16(%rsi), %xmm12
+; SSE-NEXT:    movaps (%rdx), %xmm14
+; SSE-NEXT:    movaps 16(%rdx), %xmm5
+; SSE-NEXT:    movaps (%rcx), %xmm9
+; SSE-NEXT:    movaps 16(%rcx), %xmm13
+; SSE-NEXT:    movaps (%r8), %xmm7
+; SSE-NEXT:    movaps 16(%r8), %xmm3
+; SSE-NEXT:    movaps (%r9), %xmm11
+; SSE-NEXT:    movaps 16(%r9), %xmm15
+; SSE-NEXT:    movaps %xmm3, %xmm2
+; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE-NEXT:    movaps %xmm5, %xmm4
+; SSE-NEXT:    unpckhps {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3]
+; SSE-NEXT:    movaps %xmm4, %xmm10
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm2[1]
+; SSE-NEXT:    movaps %xmm6, %xmm0
+; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1]
+; SSE-NEXT:    movaps %xmm5, %xmm13
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm3[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0]
+; SSE-NEXT:    movaps %xmm7, %xmm15
+; SSE-NEXT:    unpckhps {{.*#+}} xmm15 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
+; SSE-NEXT:    movaps %xmm14, %xmm5
+; SSE-NEXT:    unpckhps {{.*#+}} xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
+; SSE-NEXT:    movaps %xmm5, %xmm12
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm15[1]
+; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    unpckhps {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[0,1],xmm4[2,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm14 = xmm14[0],xmm9[0],xmm14[1],xmm9[1]
+; SSE-NEXT:    movaps %xmm14, %xmm5
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm7[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3]
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm14[0]
+; SSE-NEXT:    movaps %xmm1, (%rax)
 ; SSE-NEXT:    movaps %xmm7, 16(%rax)
-; SSE-NEXT:    movaps %xmm4, 32(%rax)
-; SSE-NEXT:    movaps %xmm0, 48(%rax)
-; SSE-NEXT:    movaps %xmm3, 64(%rax)
-; SSE-NEXT:    movaps %xmm13, 80(%rax)
-; SSE-NEXT:    movaps %xmm8, 96(%rax)
-; SSE-NEXT:    movaps %xmm5, 112(%rax)
-; SSE-NEXT:    movaps %xmm1, 128(%rax)
-; SSE-NEXT:    movaps %xmm14, 144(%rax)
-; SSE-NEXT:    movaps %xmm12, 160(%rax)
-; SSE-NEXT:    movaps %xmm9, 176(%rax)
+; SSE-NEXT:    movaps %xmm5, 32(%rax)
+; SSE-NEXT:    movaps %xmm4, 48(%rax)
+; SSE-NEXT:    movaps %xmm15, 64(%rax)
+; SSE-NEXT:    movaps %xmm12, 80(%rax)
+; SSE-NEXT:    movaps %xmm6, 96(%rax)
+; SSE-NEXT:    movaps %xmm3, 112(%rax)
+; SSE-NEXT:    movaps %xmm13, 128(%rax)
+; SSE-NEXT:    movaps %xmm0, 144(%rax)
+; SSE-NEXT:    movaps %xmm2, 160(%rax)
+; SSE-NEXT:    movaps %xmm10, 176(%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i32_stride6_vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps (%rdi), %ymm8
+; AVX1-NEXT:    vmovaps (%rdi), %ymm9
 ; AVX1-NEXT:    vmovaps (%rsi), %ymm11
-; AVX1-NEXT:    vmovaps (%rdx), %ymm5
-; AVX1-NEXT:    vmovaps (%rcx), %ymm6
+; AVX1-NEXT:    vmovaps (%rdx), %ymm12
+; AVX1-NEXT:    vmovaps (%rcx), %ymm1
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm4 = ymm9[0],ymm11[0],ymm9[1],ymm11[1],ymm9[4],ymm11[4],ymm9[5],ymm11[5]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vmovaps (%rcx), %xmm6
+; AVX1-NEXT:    vmovaps (%rdx), %xmm7
+; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm7[1,2],xmm6[1,2]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[0,2,1,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm5, %ymm5
+; AVX1-NEXT:    vmovaps (%r9), %xmm4
+; AVX1-NEXT:    vmovaps (%r8), %xmm2
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm10 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm3
+; AVX1-NEXT:    vmovaps (%rdi), %xmm0
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm13 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm14
+; AVX1-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1,2,3],ymm14[4,5],ymm5[6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; AVX1-NEXT:    vbroadcastss (%rcx), %xmm3
+; AVX1-NEXT:    vbroadcastss (%rdx), %xmm5
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
 ; AVX1-NEXT:    vmovaps (%r8), %ymm3
-; AVX1-NEXT:    vmovaps (%rcx), %xmm13
-; AVX1-NEXT:    vmovaps (%rdx), %xmm14
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm14[1,2],xmm13[1,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm9
-; AVX1-NEXT:    vmovaps (%rsi), %xmm0
-; AVX1-NEXT:    vmovaps (%rdi), %xmm7
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm10
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
-; AVX1-NEXT:    vbroadcastss 4(%r8), %xmm10
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3],ymm9[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss 4(%r9), %ymm10
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3],ymm9[4,5,6,7]
-; AVX1-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm11[0],ymm8[1],ymm11[1],ymm8[4],ymm11[4],ymm8[5],ymm11[5]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
-; AVX1-NEXT:    vextractf128 $1, %ymm12, %xmm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm3[4,5],ymm10[6,7]
-; AVX1-NEXT:    vbroadcastss 16(%r9), %ymm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5],ymm10[6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} ymm11 = ymm8[2],ymm11[2],ymm8[3],ymm11[3],ymm8[6],ymm11[6],ymm8[7],ymm11[7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm8 = ymm5[1,2],ymm6[1,2],ymm5[5,6],ymm6[5,6]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[0,2,1,3,4,6,5,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
-; AVX1-NEXT:    vbroadcastss 20(%r8), %xmm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3],ymm8[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss 20(%r9), %ymm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm12[3],ymm8[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss (%rcx), %xmm1
-; AVX1-NEXT:    vbroadcastss (%rdx), %xmm2
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, (%r8), %ymm0, %ymm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX1-NEXT:    vbroadcastss (%r9), %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm6[3,0],ymm5[3,0],ymm6[7,4],ymm5[7,4]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm11[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm3[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[2,1,3,3,6,5,7,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6],ymm2[7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5],ymm3[6,7]
-; AVX1-NEXT:    vmovaps (%r9), %xmm3
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX1-NEXT:    vmovaps (%r9), %ymm5
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7]
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm4 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm5 = ymm9[2],ymm11[2],ymm9[3],ymm11[3],ymm9[6],ymm11[6],ymm9[7],ymm11[7]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm12[3,0],ymm1[7,4],ymm12[7,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5],ymm3[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm12[1,2],ymm1[1,2],ymm12[5,6],ymm1[5,6]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm4[4,5],ymm8[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
+; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps %ymm1, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm3, 160(%rax)
 ; AVX1-NEXT:    vmovaps %ymm2, 64(%rax)
-; AVX1-NEXT:    vmovaps %ymm1, 160(%rax)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rax)
-; AVX1-NEXT:    vmovaps %ymm8, 128(%rax)
-; AVX1-NEXT:    vmovaps %ymm10, 96(%rax)
-; AVX1-NEXT:    vmovaps %ymm9, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm14, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm6, 96(%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
-; AVX2-SLOW-LABEL: store_i32_stride6_vf8:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm12
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm13
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm14 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm15
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[0,1,2,2]
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm0
-; AVX2-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 4(%r9), %ymm7
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd (%rcx), %xmm6
-; AVX2-SLOW-NEXT:    vpbroadcastd (%rdx), %xmm7
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm6[2,3],ymm1[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm2
-; AVX2-SLOW-NEXT:    vpbroadcastd %xmm2, %ymm6
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3,4],ymm6[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm1 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm4[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm3[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2],ymm6[3],ymm7[4],ymm6[5],ymm7[6],ymm6[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm1[4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm7 = mem[0],zero,mem[1],zero
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 20(%r9), %ymm7
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm6 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm6[2,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm13[2,1,3,3,6,5,7,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3,4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4,5,6],ymm6[7]
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm15[2],xmm5[3],xmm15[3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm14, %ymm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} ymm2 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm13[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 16(%r9), %ymm3
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 160(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm11, 128(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm9, (%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 32(%rax)
-; AVX2-SLOW-NEXT:    vzeroupper
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-ALL-LABEL: store_i32_stride6_vf8:
-; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %ymm12
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} xmm13 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm8
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm14
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm7 = xmm14[0,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm15
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[1,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2],xmm7[3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5],ymm6[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm7
-; AVX2-FAST-ALL-NEXT:    vpmovzxdq {{.*#+}} xmm8 = xmm7[0],zero,xmm7[1],zero
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3],ymm6[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 4(%r9), %ymm8
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2],ymm8[3],ymm6[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd (%rcx), %xmm6
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd (%rdx), %xmm0
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm7, %ymm1
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd (%r9), %ymm1
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} ymm0 = ymm9[2],ymm11[2],ymm9[3],ymm11[3],ymm9[6],ymm11[6],ymm9[7],ymm11[7]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[0,1,2,2,4,5,6,6]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm5 = ymm3[1,1,2,3,5,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4],ymm1[5],ymm5[6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxdq {{.*#+}} xmm5 = mem[0],zero,mem[1],zero
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3],ymm1[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 20(%r9), %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3],ymm1[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} ymm5 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-ALL-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <6,u,u,u,u,u,7,u>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm2, %ymm5, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5],ymm5[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,6,u,u,u,u,u,7>
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm12, %ymm5, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm5, %ymm13, %ymm5
-; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
-; AVX2-FAST-ALL-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm2, %ymm6, %ymm7
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3,4,5],ymm7[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm12, %ymm6, %ymm6
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4,5,6],ymm6[7]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} ymm4 = ymm9[0],ymm11[0],ymm9[1],ymm11[1],ymm9[4],ymm11[4],ymm9[5],ymm11[5]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 16(%r9), %ymm3
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, 96(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm5, 64(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 160(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 128(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm10, (%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm8, 32(%rax)
-; AVX2-FAST-ALL-NEXT:    vzeroupper
-; AVX2-FAST-ALL-NEXT:    retq
-;
-; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf8:
-; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} xmm14 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm15
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[0,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 4(%r9), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd (%rcx), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd (%rdx), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm6[2,3],ymm1[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd %xmm2, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3,4],ymm6[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} ymm1 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = ymm4[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm7 = ymm3[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2],ymm6[3],ymm7[4],ymm6[5],ymm7[6],ymm6[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm1[4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxdq {{.*#+}} xmm7 = mem[0],zero,mem[1],zero
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 20(%r9), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} ymm6 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm6[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = ymm13[2,1,3,3,6,5,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3,4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4,5,6],ymm6[7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm15[2],xmm5[3],xmm15[3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm14, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} ymm2 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm13[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 16(%r9), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 96(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 160(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm11, 128(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, (%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vzeroupper
-; AVX2-FAST-PERLANE-NEXT:    retq
+; AVX2-LABEL: store_i32_stride6_vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm11
+; AVX2-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX2-NEXT:    vmovaps (%r9), %xmm3
+; AVX2-NEXT:    vmovaps (%r8), %xmm4
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX2-NEXT:    vmovaps (%rcx), %xmm0
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm5 = xmm0[0,1,2,2]
+; AVX2-NEXT:    vmovaps (%rdx), %xmm1
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm6 = xmm1[1,1,2,3]
+; AVX2-NEXT:    vblendps {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm9[2,3],ymm5[4,5,6,7]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm7
+; AVX2-NEXT:    vmovaps (%rdi), %xmm2
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm13 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
+; AVX2-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm6
+; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-NEXT:    vbroadcastss (%rcx), %xmm5
+; AVX2-NEXT:    vbroadcastss (%rdx), %xmm6
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm14 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX2-NEXT:    vmovdqa (%rcx), %ymm6
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
+; AVX2-NEXT:    vmovdqa (%r8), %ymm5
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3],ymm2[4,5,6,7]
+; AVX2-NEXT:    vmovdqa (%r9), %ymm7
+; AVX2-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm9[4,5],ymm2[6,7]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm1 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7]
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm3 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm3 = ymm12[2],ymm6[2],ymm12[3],ymm6[3],ymm12[6],ymm6[6],ymm12[7],ymm6[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
+; AVX2-NEXT:    vpbroadcastd 20(%r9), %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm4 = ymm6[0,1,2,2,4,5,6,6]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm13 = ymm12[1,1,2,3,5,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0],ymm4[1],ymm13[2],ymm4[3],ymm13[4],ymm4[5],ymm13[6],ymm4[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5],ymm3[6,7]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm12[0],ymm6[0],ymm12[1],ymm6[1],ymm12[4],ymm6[4],ymm12[5],ymm6[5]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm4 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm4 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovdqa %ymm3, 96(%rax)
+; AVX2-NEXT:    vmovdqa %ymm1, 128(%rax)
+; AVX2-NEXT:    vmovdqa %ymm2, 160(%rax)
+; AVX2-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX2-NEXT:    vmovaps %ymm9, (%rax)
+; AVX2-NEXT:    vmovaps %ymm8, 32(%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_i32_stride6_vf8:
 ; AVX512:       # %bb.0:
@@ -728,22 +535,22 @@ define void @store_i32_stride6_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
 ; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
-; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
+; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,8,16,24,u,u,1,9,17,25,u,u,2,10,18,26>
 ; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,16,24,6,7,8,9,17,25,12,13,14,15]
 ; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,u,3,11,19,27,u,u,4,12,20,28,u,u,5,13>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [18,26,2,3,4,5,19,27,8,9,10,11,20,28,14,15]
-; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,13,u,u,22,30,6,14,u,u,23,31,7,15,u,u>
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,21,29,4,5,6,7,22,30,10,11,12,13,23,31]
-; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm0
-; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rax)
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <2,10,19,27,u,u,3,11,20,28,u,u,4,12,21,29>
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,19,27,6,7,8,9,20,28,12,13,14,15]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,13,21,29,u,u,6,14,22,30,u,u,7,15,23,31>
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,22,30,6,7,8,9,23,31,12,13,14,15]
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm3, %zmm1
+; AVX512-NEXT:    vmovdqu64 %zmm1, 128(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm4, (%rax)
 ; AVX512-NEXT:    vzeroupper
@@ -772,156 +579,141 @@ define void @store_i32_stride6_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
 ; SSE-LABEL: store_i32_stride6_vf16:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    subq $72, %rsp
-; SSE-NEXT:    movaps (%rdi), %xmm0
-; SSE-NEXT:    movaps 16(%rdi), %xmm10
-; SSE-NEXT:    movaps (%rsi), %xmm11
-; SSE-NEXT:    movaps 16(%rsi), %xmm8
-; SSE-NEXT:    movaps (%rdx), %xmm13
-; SSE-NEXT:    movaps 16(%rdx), %xmm12
-; SSE-NEXT:    movaps (%rcx), %xmm3
-; SSE-NEXT:    movaps 16(%rcx), %xmm9
-; SSE-NEXT:    movaps (%r8), %xmm2
-; SSE-NEXT:    movaps 16(%r8), %xmm7
-; SSE-NEXT:    movaps (%r9), %xmm1
-; SSE-NEXT:    movaps 16(%r9), %xmm14
-; SSE-NEXT:    movaps %xmm13, %xmm4
-; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE-NEXT:    movaps %xmm0, %xmm5
-; SSE-NEXT:    unpcklps {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
-; SSE-NEXT:    movaps %xmm1, %xmm6
-; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm5[2,3]
+; SSE-NEXT:    movapd (%rdi), %xmm12
+; SSE-NEXT:    movapd 16(%rdi), %xmm10
+; SSE-NEXT:    movapd (%rsi), %xmm13
+; SSE-NEXT:    movapd 16(%rsi), %xmm8
+; SSE-NEXT:    movapd (%rdx), %xmm4
+; SSE-NEXT:    movapd 16(%rdx), %xmm2
+; SSE-NEXT:    movapd (%rcx), %xmm5
+; SSE-NEXT:    movapd 16(%rcx), %xmm9
+; SSE-NEXT:    movapd (%r8), %xmm7
+; SSE-NEXT:    movapd 16(%r8), %xmm14
+; SSE-NEXT:    movapd (%r9), %xmm0
+; SSE-NEXT:    movapd 16(%r9), %xmm11
+; SSE-NEXT:    movapd %xmm4, %xmm1
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE-NEXT:    movapd %xmm12, %xmm3
+; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1]
+; SSE-NEXT:    movapd %xmm3, %xmm6
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm1[0]
+; SSE-NEXT:    movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movapd %xmm7, %xmm6
+; SSE-NEXT:    unpcklps {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; SSE-NEXT:    movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1]
+; SSE-NEXT:    movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm1[1],xmm6[1]
 ; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0]
-; SSE-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm2, %xmm5
-; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,3],xmm5[0,2]
-; SSE-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm3, %xmm5
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm13[1]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
-; SSE-NEXT:    movaps %xmm2, %xmm4
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[2,3]
-; SSE-NEXT:    movaps %xmm4, (%rsp) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,0]
-; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm13 = xmm13[2],xmm3[2],xmm13[3],xmm3[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm13 = xmm13[2,3],xmm2[0,2]
-; SSE-NEXT:    movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm12, %xmm1
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
-; SSE-NEXT:    movaps %xmm10, %xmm0
-; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
-; SSE-NEXT:    movaps %xmm14, %xmm2
-; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[2,3]
-; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm7, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm14[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm2[0,2]
-; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm9, %xmm2
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm12[1]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; SSE-NEXT:    movapd %xmm12, %xmm1
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE-NEXT:    movapd %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT:    unpckhps {{.*#+}} xmm7 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; SSE-NEXT:    movsd {{.*#+}} xmm12 = xmm7[0],xmm12[1]
+; SSE-NEXT:    movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movhlps {{.*#+}} xmm7 = xmm4[1],xmm7[1]
+; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
+; SSE-NEXT:    movapd %xmm10, %xmm3
+; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE-NEXT:    movapd %xmm3, %xmm1
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movapd %xmm14, %xmm1
+; SSE-NEXT:    movapd %xmm14, %xmm13
+; SSE-NEXT:    unpcklps {{.*#+}} xmm13 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; SSE-NEXT:    movsd {{.*#+}} xmm3 = xmm13[0],xmm3[1]
+; SSE-NEXT:    movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movhlps {{.*#+}} xmm13 = xmm0[1],xmm13[1]
+; SSE-NEXT:    movapd 32(%rdi), %xmm14
+; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
+; SSE-NEXT:    movapd 32(%rdx), %xmm0
 ; SSE-NEXT:    unpckhps {{.*#+}} xmm10 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
-; SSE-NEXT:    movaps %xmm7, %xmm0
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm14[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm10[2,3]
-; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,1],xmm2[2,0]
-; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps 32(%rdi), %xmm10
-; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,3],xmm14[3,3]
-; SSE-NEXT:    movaps 32(%rdx), %xmm11
-; SSE-NEXT:    unpckhps {{.*#+}} xmm12 = xmm12[2],xmm9[2],xmm12[3],xmm9[3]
-; SSE-NEXT:    movaps 32(%rcx), %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[2,3],xmm7[0,2]
-; SSE-NEXT:    movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm11, %xmm12
-; SSE-NEXT:    unpcklps {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
-; SSE-NEXT:    movaps 32(%rsi), %xmm1
-; SSE-NEXT:    movaps %xmm10, %xmm13
-; SSE-NEXT:    unpcklps {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
-; SSE-NEXT:    movaps 32(%r8), %xmm2
-; SSE-NEXT:    movaps 32(%r9), %xmm8
-; SSE-NEXT:    movaps %xmm8, %xmm15
-; SSE-NEXT:    movlhps {{.*#+}} xmm15 = xmm15[0],xmm2[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm15 = xmm15[2,0],xmm13[2,3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm13 = xmm13[0],xmm12[0]
-; SSE-NEXT:    movaps %xmm2, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm8[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm12 = xmm12[2,3],xmm3[0,2]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm10 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm11[1]
-; SSE-NEXT:    movaps %xmm2, %xmm9
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm8[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,2],xmm10[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm10 = xmm10[0,1],xmm1[2,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,3],xmm8[3,3]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm11 = xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm11 = xmm11[2,3],xmm2[0,2]
-; SSE-NEXT:    movaps 48(%rdx), %xmm2
-; SSE-NEXT:    movaps 48(%rcx), %xmm8
-; SSE-NEXT:    movaps %xmm2, %xmm4
-; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
-; SSE-NEXT:    movaps 48(%rdi), %xmm0
-; SSE-NEXT:    movaps 48(%rsi), %xmm14
-; SSE-NEXT:    movaps %xmm0, %xmm5
-; SSE-NEXT:    unpcklps {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
-; SSE-NEXT:    movaps 48(%r8), %xmm3
-; SSE-NEXT:    movaps 48(%r9), %xmm7
-; SSE-NEXT:    movaps %xmm7, %xmm6
-; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm3[0]
-; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm5[2,3]
-; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0]
-; SSE-NEXT:    movaps %xmm3, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm7[1,1]
-; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,3],xmm1[0,2]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; SSE-NEXT:    movaps %xmm8, %xmm14
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm2[1]
-; SSE-NEXT:    movaps %xmm3, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,3],xmm7[3,3]
-; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[2,3],xmm3[0,2]
+; SSE-NEXT:    movapd 32(%rcx), %xmm3
+; SSE-NEXT:    unpckhps {{.*#+}} xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; SSE-NEXT:    movapd %xmm10, %xmm4
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm4 = xmm4[0],xmm2[0]
+; SSE-NEXT:    movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movsd {{.*#+}} xmm10 = xmm1[0],xmm10[1]
+; SSE-NEXT:    movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movapd %xmm0, %xmm2
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT:    movapd 32(%rsi), %xmm1
+; SSE-NEXT:    movapd %xmm14, %xmm8
+; SSE-NEXT:    unpcklps {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
+; SSE-NEXT:    movapd %xmm8, %xmm4
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm4 = xmm4[0],xmm2[0]
+; SSE-NEXT:    movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movapd 32(%r8), %xmm4
+; SSE-NEXT:    movapd 32(%r9), %xmm6
+; SSE-NEXT:    movapd %xmm4, %xmm5
+; SSE-NEXT:    unpcklps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; SSE-NEXT:    movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
+; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm2[1],xmm5[1]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm14 = xmm14[2],xmm1[2],xmm14[3],xmm1[3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm4 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE-NEXT:    movapd %xmm14, %xmm15
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm15 = xmm15[0],xmm0[0]
+; SSE-NEXT:    movsd {{.*#+}} xmm14 = xmm4[0],xmm14[1]
+; SSE-NEXT:    movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1]
+; SSE-NEXT:    movapd 48(%rdx), %xmm3
+; SSE-NEXT:    movapd 48(%rcx), %xmm10
+; SSE-NEXT:    movapd %xmm3, %xmm6
+; SSE-NEXT:    unpcklps {{.*#+}} xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1]
+; SSE-NEXT:    movapd 48(%rdi), %xmm2
+; SSE-NEXT:    movapd 48(%rsi), %xmm12
+; SSE-NEXT:    movapd %xmm2, %xmm7
+; SSE-NEXT:    unpcklps {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
+; SSE-NEXT:    movapd %xmm7, %xmm9
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm6[0]
+; SSE-NEXT:    movapd 48(%r8), %xmm0
+; SSE-NEXT:    movapd 48(%r9), %xmm11
+; SSE-NEXT:    movapd %xmm0, %xmm1
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
+; SSE-NEXT:    movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
+; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm6[1],xmm1[1]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm2 = xmm2[2],xmm12[2],xmm2[3],xmm12[3]
+; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE-NEXT:    movapd %xmm2, %xmm6
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm3[0]
+; SSE-NEXT:    movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm3[1],xmm0[1]
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movaps %xmm2, 368(%rax)
-; SSE-NEXT:    movaps %xmm1, 352(%rax)
-; SSE-NEXT:    movaps %xmm0, 336(%rax)
-; SSE-NEXT:    movaps %xmm4, 320(%rax)
-; SSE-NEXT:    movaps %xmm6, 304(%rax)
-; SSE-NEXT:    movaps %xmm5, 288(%rax)
-; SSE-NEXT:    movaps %xmm11, 272(%rax)
-; SSE-NEXT:    movaps %xmm9, 256(%rax)
-; SSE-NEXT:    movaps %xmm10, 240(%rax)
-; SSE-NEXT:    movaps %xmm12, 224(%rax)
-; SSE-NEXT:    movaps %xmm15, 208(%rax)
-; SSE-NEXT:    movaps %xmm13, 192(%rax)
+; SSE-NEXT:    movaps %xmm0, 368(%rax)
+; SSE-NEXT:    movapd %xmm2, 352(%rax)
+; SSE-NEXT:    movapd %xmm6, 336(%rax)
+; SSE-NEXT:    movaps %xmm1, 320(%rax)
+; SSE-NEXT:    movapd %xmm7, 304(%rax)
+; SSE-NEXT:    movapd %xmm9, 288(%rax)
+; SSE-NEXT:    movaps %xmm4, 272(%rax)
+; SSE-NEXT:    movapd %xmm14, 256(%rax)
+; SSE-NEXT:    movapd %xmm15, 240(%rax)
+; SSE-NEXT:    movaps %xmm5, 224(%rax)
+; SSE-NEXT:    movapd %xmm8, 208(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 192(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 176(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 160(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 144(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 128(%rax)
+; SSE-NEXT:    movaps %xmm13, 128(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 112(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 96(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 80(%rax)
-; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 64(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 64(%rax)
+; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 48(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 32(%rax)
@@ -934,791 +726,416 @@ define void @store_i32_stride6_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
 ;
 ; AVX1-LABEL: store_i32_stride6_vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $136, %rsp
-; AVX1-NEXT:    vmovaps (%rdi), %ymm8
-; AVX1-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX1-NEXT:    vmovaps (%rsi), %ymm5
-; AVX1-NEXT:    vmovaps 32(%rsi), %ymm2
-; AVX1-NEXT:    vmovaps (%rdx), %ymm15
-; AVX1-NEXT:    vmovaps 32(%rdx), %ymm10
-; AVX1-NEXT:    vmovaps 32(%rcx), %ymm13
-; AVX1-NEXT:    vmovaps 32(%r8), %ymm3
-; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[4],ymm2[4],ymm4[5],ymm2[5]
+; AVX1-NEXT:    subq $312, %rsp # imm = 0x138
+; AVX1-NEXT:    vmovaps (%rdi), %ymm2
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rsi), %ymm0
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rdx), %ymm12
+; AVX1-NEXT:    vmovaps 32(%rcx), %ymm6
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm10[0],ymm13[2],ymm10[2]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm6[0],ymm12[0],ymm6[2],ymm12[2]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss 48(%r9), %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovaps 32(%rcx), %xmm11
-; AVX1-NEXT:    vmovaps 32(%rdx), %xmm7
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm7[1,2],xmm11[1,2]
+; AVX1-NEXT:    vmovaps 32(%rcx), %xmm1
+; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rdx), %xmm0
+; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps 32(%rsi), %xmm1
-; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm3
-; AVX1-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss 36(%r8), %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss 36(%r9), %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps 32(%r9), %xmm15
+; AVX1-NEXT:    vmovaps 32(%r8), %xmm13
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm0
+; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm14
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm8
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm8[0],ymm5[0],ymm8[1],ymm5[1],ymm8[4],ymm5[4],ymm8[5],ymm5[5]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vmovaps (%rcx), %ymm9
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm9[0],ymm15[0],ymm9[2],ymm15[2]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vmovaps (%r8), %ymm1
-; AVX1-NEXT:    vmovups %ymm1, (%rsp) # 32-byte Spill
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss 16(%r9), %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX1-NEXT:    vmovaps (%rsi), %ymm0
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm7 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm7[2,3,2,3]
+; AVX1-NEXT:    vmovaps (%rdx), %ymm7
+; AVX1-NEXT:    vmovaps (%rcx), %ymm8
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vextractf128 $1, %ymm11, %xmm11
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vmovaps (%rcx), %xmm0
 ; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps (%rdx), %xmm1
 ; AVX1-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,2],xmm0[1,2]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    vmovaps (%rsi), %xmm3
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[1,2],xmm0[1,2]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[0,2,1,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
+; AVX1-NEXT:    vmovaps (%r9), %xmm11
+; AVX1-NEXT:    vmovaps (%r8), %xmm10
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm2
 ; AVX1-NEXT:    vmovaps (%rdi), %xmm1
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm14 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss 4(%r8), %xmm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm12[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss 4(%r9), %ymm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} ymm2 = ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[6],ymm2[6],ymm4[7],ymm2[7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm10[1,2],ymm13[1,2],ymm10[5,6],ymm13[5,6]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss 52(%r8), %xmm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss 52(%r9), %ymm4
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpckhps {{.*#+}} ymm4 = ymm8[2],ymm5[2],ymm8[3],ymm5[3],ymm8[6],ymm5[6],ymm8[7],ymm5[7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm15[1,2],ymm9[1,2],ymm15[5,6],ymm9[5,6]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm4
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX1-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX1-NEXT:    vbroadcastss (%rcx), %xmm1
+; AVX1-NEXT:    vbroadcastss (%rdx), %xmm3
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vmovaps 32(%r8), %ymm0
+; AVX1-NEXT:    vmovaps 32(%r9), %ymm1
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm4 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss 20(%r8), %xmm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss 20(%r9), %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2],ymm5[3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vbroadcastss (%rcx), %xmm0
-; AVX1-NEXT:    vbroadcastss (%rdx), %xmm5
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, (%r8), %ymm1, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastss (%r9), %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm13[3,0],ymm10[3,0],ymm13[7,4],ymm10[7,4]
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm1 = ymm6[3,0],ymm12[3,0],ymm6[7,4],ymm12[7,4]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm2 = mem[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[2,1,3,3,6,5,7,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6],ymm2[7]
-; AVX1-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm11[2],xmm7[3],xmm11[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm6, %ymm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5],ymm3[6,7]
-; AVX1-NEXT:    vmovaps 32(%r9), %xmm3
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm3[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5,6],ymm3[7]
-; AVX1-NEXT:    vbroadcastss 32(%rcx), %xmm1
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm12[1,2],ymm6[1,2],ymm12[5,6],ymm6[5,6]
+; AVX1-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm2 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm2 = mem[0,1,2,3],ymm3[4,5],mem[6,7]
+; AVX1-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm9[2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm3 = xmm2[2],mem[2],xmm2[3],mem[3]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
+; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm3 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm3 = xmm14[0],mem[0],xmm14[1],mem[1]
+; AVX1-NEXT:    vbroadcastss 32(%rcx), %xmm4
 ; AVX1-NEXT:    vbroadcastss 32(%rdx), %xmm5
-; AVX1-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3],ymm6[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, 32(%r8), %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,3],ymm5[4,5],ymm1[6,7]
-; AVX1-NEXT:    vbroadcastss 32(%r9), %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm5[5],ymm1[6,7]
-; AVX1-NEXT:    vshufps {{.*#+}} ymm5 = ymm9[3,0],ymm15[3,0],ymm9[7,4],ymm15[7,4]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm5[2,3]
-; AVX1-NEXT:    vperm2f128 $51, (%rsp), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm5 = mem[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[2,1,3,3,6,5,7,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3,4,5],ymm5[6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3,2,3]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,2,2,3,4,6,6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4,5,6],ymm5[7]
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
-; AVX1-NEXT:    vmovaps (%r9), %xmm6
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm14, %ymm5
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = mem[2,1,3,3]
+; AVX1-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX1-NEXT:    vmovaps (%r8), %ymm4
+; AVX1-NEXT:    vmovaps (%r9), %ymm5
+; AVX1-NEXT:    vunpcklps {{.*#+}} ymm9 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX1-NEXT:    vunpckhps {{.*#+}} ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm5 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm12
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm12 = ymm8[3,0],ymm7[3,0],ymm8[7,4],ymm7[7,4]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm12[4,5],ymm4[6,7]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm8[1,2],ymm7[5,6],ymm8[5,6]
+; AVX1-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm8 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm8 = mem[0,1,2,3],ymm9[4,5],mem[6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm9
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,2,1,3,4,6,5,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm9[2,3],ymm7[4,5,6,7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5],ymm7[6,7]
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm11[2],xmm10[3],xmm11[3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm7, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3,4,5],ymm7[6,7]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = xmm6[0,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4,5,6],ymm6[7]
+; AVX1-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-NEXT:    # ymm7 = ymm7[0,1],mem[2,3],ymm7[4,5,6,7]
+; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5],ymm7[6,7]
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps %ymm5, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm2, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm5, 128(%rax)
 ; AVX1-NEXT:    vmovaps %ymm4, 160(%rax)
-; AVX1-NEXT:    vmovaps %ymm1, 192(%rax)
-; AVX1-NEXT:    vmovaps %ymm3, 256(%rax)
-; AVX1-NEXT:    vmovaps %ymm2, 352(%rax)
-; AVX1-NEXT:    vmovaps %ymm0, (%rax)
-; AVX1-NEXT:    vmovaps %ymm8, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm3, 192(%rax)
+; AVX1-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX1-NEXT:    vmovaps %ymm6, 320(%rax)
+; AVX1-NEXT:    vmovaps %ymm1, 352(%rax)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 320(%rax)
-; AVX1-NEXT:    vmovaps %ymm12, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 96(%rax)
+; AVX1-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm8, 96(%rax)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm0, 224(%rax)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm0, 288(%rax)
-; AVX1-NEXT:    addq $136, %rsp
+; AVX1-NEXT:    addq $312, %rsp # imm = 0x138
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
-; AVX2-SLOW-LABEL: store_i32_stride6_vf16:
-; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $200, %rsp
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm8
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm10
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm4
-; AVX2-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX2-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm12
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %xmm11
-; AVX2-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm11[0],zero,xmm11[1],zero
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 36(%r9), %ymm4
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,1,2,2]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm12[0],zero,xmm12[1],zero
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 4(%r9), %ymm4
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpbroadcastd (%rcx), %xmm3
-; AVX2-SLOW-NEXT:    vpbroadcastd (%rdx), %xmm4
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm5
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm15
-; AVX2-SLOW-NEXT:    vpbroadcastd %xmm15, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm5[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm7
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm13 = ymm9[2],ymm7[2],ymm9[3],ymm7[3],ymm9[6],ymm7[6],ymm9[7],ymm7[7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm13[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 52(%r9), %ymm4
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm4[3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpbroadcastd 32(%rcx), %xmm2
-; AVX2-SLOW-NEXT:    vpbroadcastd 32(%rdx), %xmm4
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastq %xmm11, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX2-SLOW-NEXT:    vpbroadcastd %xmm6, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm2[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2],ymm4[3],ymm8[4],ymm4[5],ymm8[6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm4[2,1,2,3]
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm4 = ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[6],ymm8[6],ymm10[7],ymm8[7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm14 = mem[0],zero,mem[1],zero
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 20(%r9), %ymm14
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3],ymm0[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm0 = ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[6],ymm3[6],ymm5[7],ymm3[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
-; AVX2-SLOW-NEXT:    vmovdqa 32(%r8), %ymm14
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm14[2,1,3,3,6,5,7,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5],ymm13[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = mem[0,2,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0],ymm13[1],ymm0[2,3,4,5,6],ymm13[7]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[4],ymm3[4],ymm5[5],ymm3[5]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm9[0],ymm7[0],ymm9[1],ymm7[1],ymm9[4],ymm7[4],ymm9[5],ymm7[5]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 48(%r9), %ymm3
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[2,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[2,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6],ymm5[7]
-; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm5 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm5[2,3]
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[2,1,3,3,6,5,7,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3,4,5,6],ymm6[7]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
-; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} ymm2 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpbroadcastd 16(%r9), %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm2
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[2,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3,4,5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[2,2,3,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4,5,6],ymm5[7]
-; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 64(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 160(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 256(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 288(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm13, 352(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 128(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 192(%rax)
-; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 320(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, (%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 224(%rax)
-; AVX2-SLOW-NEXT:    addq $200, %rsp
-; AVX2-SLOW-NEXT:    vzeroupper
-; AVX2-SLOW-NEXT:    retq
-;
-; AVX2-FAST-ALL-LABEL: store_i32_stride6_vf16:
-; AVX2-FAST-ALL:       # %bb.0:
-; AVX2-FAST-ALL-NEXT:    subq $184, %rsp
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %xmm9
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %xmm10
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %xmm7
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX2-FAST-ALL-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %xmm11
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[1,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm0[4,5],ymm4[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %xmm5
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %xmm0
-; AVX2-FAST-ALL-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 36(%r9), %ymm6
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2],ymm6[3],ymm4[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm4
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm6 = xmm7[0,1,2,2]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm5[0],zero,xmm5[1],zero
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 4(%r9), %ymm4
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd (%rcx), %xmm3
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd (%rdx), %xmm4
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdx), %ymm13
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rcx), %ymm14
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm5, %ymm2
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd (%r9), %ymm2
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[0,1,2,2,4,5,6,6]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm2 = ymm13[1,1,2,3,5,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rsi), %ymm2
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} ymm6 = ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[6],ymm2[6],ymm4[7],ymm2[7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 52(%r9), %ymm3
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 32(%rcx), %xmm1
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 32(%rdx), %xmm3
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 32(%r9), %ymm1
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdx), %ymm7
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm1 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[1,1,2,3,5,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm10 = ymm1[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} ymm1 = ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[6],ymm3[6],ymm5[7],ymm3[7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm1[4,5],ymm10[6,7]
-; AVX2-FAST-ALL-NEXT:    vpmovzxdq {{.*#+}} xmm15 = mem[0],zero,mem[1],zero
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm15[2,3],ymm10[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 20(%r9), %ymm15
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2],ymm15[3],ymm10[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} ymm15 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7]
-; AVX2-FAST-ALL-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm6[2,3],ymm15[2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r8), %ymm15
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm9 = [6,5,3,3,6,5,7,7]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm15, %ymm9, %ymm12
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0,1],ymm6[2,3,4,5],ymm12[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%r9), %ymm12
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = [4,6,2,3,4,6,6,7]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm12, %ymm8, %ymm10
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm10[1],ymm6[2,3,4,5,6],ymm10[7]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} ymm10 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} ymm2 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[4],ymm2[4],ymm4[5],ymm2[5]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm4 = ymm10[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5],ymm2[6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 48(%r9), %ymm4
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5],ymm2[6,7]
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm4 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm4 = xmm11[2],mem[2],xmm11[3],mem[3]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm4, %ymm10, %ymm4
-; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [2,2,3,3,2,2,3,3]
-; AVX2-FAST-ALL-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm15, %ymm10, %ymm11
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1],ymm4[2,3,4,5],ymm11[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm12, %ymm10, %ymm11
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2,3,4,5,6],ymm11[7]
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{.*#+}} ymm11 = ymm7[2],ymm0[2],ymm7[3],ymm0[3],ymm7[6],ymm0[6],ymm7[7],ymm0[7]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-FAST-ALL-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm11[2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r8), %ymm11
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm11, %ymm9, %ymm9
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm1[2,3,4,5],ymm9[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa (%r9), %ymm9
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm9, %ymm8, %ymm8
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4,5,6],ymm8[7]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[1],ymm0[1],ymm7[4],ymm0[4],ymm7[5],ymm0[5]
-; AVX2-FAST-ALL-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[4],ymm3[4],ymm5[5],ymm3[5]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX2-FAST-ALL-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vpbroadcastd 16(%r9), %ymm3
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
-; AVX2-FAST-ALL-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-FAST-ALL-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX2-FAST-ALL-NEXT:    # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
-; AVX2-FAST-ALL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX2-FAST-ALL-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm11, %ymm10, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5],ymm5[6,7]
-; AVX2-FAST-ALL-NEXT:    vpermd %ymm9, %ymm10, %ymm5
-; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6],ymm5[7]
-; AVX2-FAST-ALL-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm3, 64(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, 96(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm1, 160(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm4, 256(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm2, 288(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm6, 352(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 128(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 192(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 320(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, (%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-ALL-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-ALL-NEXT:    vmovaps %ymm0, 224(%rax)
-; AVX2-FAST-ALL-NEXT:    addq $184, %rsp
-; AVX2-FAST-ALL-NEXT:    vzeroupper
-; AVX2-FAST-ALL-NEXT:    retq
-;
-; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf16:
-; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    subq $200, %rsp
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %xmm11
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm11[0],zero,xmm11[1],zero
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 36(%r9), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,1,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[1,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm12[0],zero,xmm12[1],zero
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 4(%r9), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd (%rcx), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd (%rdx), %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdx), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm12, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r9), %xmm15
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd %xmm15, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm4 = ymm5[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rsi), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} ymm13 = ymm9[2],ymm7[2],ymm9[3],ymm7[3],ymm9[6],ymm7[6],ymm9[7],ymm7[7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm13[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 52(%r9), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm4[3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 32(%rcx), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 32(%rdx), %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm11, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd %xmm6, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm8 = ymm2[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2],ymm4[3],ymm8[4],ymm4[5],ymm8[6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm4[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} ymm4 = ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[6],ymm8[6],ymm10[7],ymm8[7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpmovzxdq {{.*#+}} xmm14 = mem[0],zero,mem[1],zero
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 20(%r9), %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3],ymm0[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} ymm0 = ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[6],ymm3[6],ymm5[7],ymm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%r8), %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm13 = ymm14[2,1,3,3,6,5,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3,4,5],ymm13[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm13 = mem[0,2,2,3,4,6,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0],ymm13[1],ymm0[2,3,4,5,6],ymm13[7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[4],ymm3[4],ymm5[5],ymm3[5]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm9[0],ymm7[0],ymm9[1],ymm7[1],ymm9[4],ymm7[4],ymm9[5],ymm7[5]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 48(%r9), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm5, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[2,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5],ymm5[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[2,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6],ymm5[7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{.*#+}} ymm5 = ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[6],ymm1[6],ymm2[7],ymm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm5[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%r8), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[2,1,3,3,6,5,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3,4,5,6],ymm6[7]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
-; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} ymm2 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[4],ymm8[4],ymm10[5],ymm8[5]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 16(%r9), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[2,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3,4,5],ymm5[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[2,2,3,3]
-; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4,5,6],ymm5[7]
-; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 64(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, 160(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm3, 256(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 288(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm13, 352(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 128(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 192(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 320(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, (%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, 224(%rax)
-; AVX2-FAST-PERLANE-NEXT:    addq $200, %rsp
-; AVX2-FAST-PERLANE-NEXT:    vzeroupper
-; AVX2-FAST-PERLANE-NEXT:    retq
+; AVX2-LABEL: store_i32_stride6_vf16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    subq $232, %rsp
+; AVX2-NEXT:    vmovaps (%r9), %xmm13
+; AVX2-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT:    vmovdqa 32(%r9), %xmm12
+; AVX2-NEXT:    vmovaps (%r8), %xmm14
+; AVX2-NEXT:    vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT:    vmovdqa 32(%r8), %xmm10
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
+; AVX2-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vmovaps (%rcx), %xmm6
+; AVX2-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT:    vmovdqa 32(%rcx), %xmm8
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm8[0,1,2,2]
+; AVX2-NEXT:    vmovaps (%rdx), %xmm7
+; AVX2-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT:    vmovdqa 32(%rdx), %xmm4
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm4[1,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm11
+; AVX2-NEXT:    vmovdqa 32(%rsi), %xmm5
+; AVX2-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm9
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7]
+; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm6[0,1,2,2]
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm6 = xmm7[1,1,2,3]
+; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm6[0],xmm0[1],xmm6[2],xmm0[3]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vmovaps (%rdi), %xmm1
+; AVX2-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; AVX2-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm9
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vbroadcastss (%rcx), %xmm0
+; AVX2-NEXT:    vbroadcastss (%rdx), %xmm7
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; AVX2-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
+; AVX2-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm1
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vmovdqa 32(%r8), %ymm0
+; AVX2-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-NEXT:    vmovdqa 32(%r9), %ymm11
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm11[2],ymm0[3],ymm11[3],ymm0[6],ymm11[6],ymm0[7],ymm11[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX2-NEXT:    vmovdqa 32(%rdi), %ymm13
+; AVX2-NEXT:    vmovdqa 32(%rsi), %ymm14
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm6 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
+; AVX2-NEXT:    vextracti128 $1, %ymm6, %xmm1
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm7 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX2-NEXT:    vmovdqa 32(%rcx), %ymm0
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm15 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm15[4,5],ymm7[6,7]
+; AVX2-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm7 = mem[0],zero,mem[1],zero
+; AVX2-NEXT:    vpbroadcastd 52(%r9), %ymm15
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1,2],xmm15[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm15 = ymm0[0,1,2,2,4,5,6,6]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm9 = ymm1[1,1,2,3,5,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2],ymm15[3],ymm9[4],ymm15[5],ymm9[6],ymm15[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3],ymm9[4,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX2-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7]
+; AVX2-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vpbroadcastd 32(%rcx), %xmm2
+; AVX2-NEXT:    vpbroadcastd 32(%rdx), %xmm6
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
+; AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vmovdqa (%r8), %ymm3
+; AVX2-NEXT:    vmovdqa (%r9), %ymm5
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm6 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm8
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm9 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
+; AVX2-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm10[2,3],ymm6[4,5,6,7]
+; AVX2-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX2-NEXT:    vmovdqa (%rcx), %ymm12
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} ymm15 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm15[4,5],ymm6[6,7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm6 = mem[0],zero,mem[1],zero
+; AVX2-NEXT:    vpbroadcastd 20(%r9), %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[0,1,2,2,4,5,6,6]
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm10[1,1,2,3,5,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2],ymm6[3],ymm2[4],ymm6[5],ymm2[6],ymm6[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5],ymm2[6,7]
+; AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX2-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX2-NEXT:    # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
+; AVX2-NEXT:    vpblendd $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-NEXT:    # ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5,6,7]
+; AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX2-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
+; AVX2-NEXT:    # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm1 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
+; AVX2-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm11[0],ymm1[1],ymm11[1],ymm1[4],ymm11[4],ymm1[5],ymm11[5]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm1 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm6 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3],ymm6[4,5,6,7]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5],ymm1[6,7]
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovdqa %ymm1, 96(%rax)
+; AVX2-NEXT:    vmovdqa %ymm0, 288(%rax)
+; AVX2-NEXT:    vmovdqa %ymm4, 64(%rax)
+; AVX2-NEXT:    vmovdqa %ymm2, 128(%rax)
+; AVX2-NEXT:    vmovdqa %ymm15, 160(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 192(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 320(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 352(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, (%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-NEXT:    addq $232, %rsp
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_i32_stride6_vf16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm2
-; AVX512-NEXT:    vmovdqu64 (%rsi), %zmm3
-; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm4
-; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm5
-; AVX512-NEXT:    vmovdqu64 (%r8), %zmm1
-; AVX512-NEXT:    vmovdqu64 (%r9), %zmm0
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
-; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm6
-; AVX512-NEXT:    vmovdqa (%rdx), %ymm7
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm8 = [3,11,0,8,7,15,4,12]
-; AVX512-NEXT:    vpermi2d (%rcx), %ymm7, %ymm8
-; AVX512-NEXT:    movb $36, %cl
-; AVX512-NEXT:    kmovd %ecx, %k1
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,0,1]
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm6, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm7, %zmm6
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,0,16>
-; AVX512-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm1
+; AVX512-NEXT:    vmovdqu64 (%rsi), %zmm2
+; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm3
+; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm4
+; AVX512-NEXT:    vmovdqu64 (%r8), %zmm6
+; AVX512-NEXT:    vmovdqu64 (%r9), %zmm7
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
+; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm4, %zmm3, %zmm5
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
+; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm0
+; AVX512-NEXT:    movb $-110, %al
+; AVX512-NEXT:    kmovd %eax, %k1
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
+; AVX512-NEXT:    vpunpckldq {{.*#+}} zmm5 = zmm6[0],zmm7[0],zmm6[1],zmm7[1],zmm6[4],zmm7[4],zmm6[5],zmm7[5],zmm6[8],zmm7[8],zmm6[9],zmm7[9],zmm6[12],zmm7[12],zmm6[13],zmm7[13]
+; AVX512-NEXT:    movb $36, %al
+; AVX512-NEXT:    kmovd %eax, %k2
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm5[0,1,4,5,4,5,0,1]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [2,18,0,0,4,20,3,19,2,18,0,0,4,20,3,19]
+; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm7, %zmm6, %zmm5
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm5 {%k1}
+; AVX512-NEXT:    vmovdqa (%rdx), %ymm8
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm9 = [3,11,0,8,7,15,4,12]
+; AVX512-NEXT:    vpermi2d (%rcx), %ymm8, %ymm9
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm9[0,1,0,1,2,3,0,1]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [6,22,5,21,0,0,7,23,6,22,5,21,0,0,7,23]
 ; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm8
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm8, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm7, %zmm8
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
+; AVX512-NEXT:    vpermi2d %zmm7, %zmm6, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
 ; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm9
-; AVX512-NEXT:    movb $-110, %cl
-; AVX512-NEXT:    kmovd %ecx, %k2
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,24,u,6,7,8,9,25,u,12,13,14,15>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm7, %zmm9
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
+; AVX512-NEXT:    vpermi2d %zmm4, %zmm3, %zmm9
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} ymm8 = ymm8[2],mem[2],ymm8[3],mem[3],ymm8[6],mem[6],ymm8[7],mem[7]
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm8[0,1,2,3,2,3,0,1]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [14,30,13,29,0,0,15,31,14,30,13,29,0,0,15,31]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm7, %zmm6, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm10
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm10 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm10, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm7, %zmm10
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
-; AVX512-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512-NEXT:    vpunpckhdq {{.*#+}} ymm11 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[0,1,2,3,2,3,0,1]
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm7, %zmm11
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm11, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
+; AVX512-NEXT:    vpermi2d %zmm4, %zmm3, %zmm10
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} zmm8 = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm8[0,1,6,7,6,7,0,1]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
+; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [10,26,0,0,12,28,11,27,10,26,0,0,12,28,11,27]
 ; AVX512-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
-; AVX512-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[10],zmm3[10],zmm2[11],zmm3[11],zmm2[14],zmm3[14],zmm2[15],zmm3[15]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[0,1,6,7,6,7,0,1]
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm11, %zmm2
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
-; AVX512-NEXT:    vpermi2d %zmm0, %zmm2, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm10, (%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm9, 192(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm1, 320(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm8, 256(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm7, 128(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm6, 64(%rax)
+; AVX512-NEXT:    vpermi2d %zmm7, %zmm6, %zmm11
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512-NEXT:    vpermi2d %zmm4, %zmm3, %zmm6
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
+; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm4, %zmm3, %zmm6
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
+; AVX512-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm3 {%k1}
+; AVX512-NEXT:    vmovdqa (%r8), %xmm1
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[0,1,0,1,0,1,0,1]
+; AVX512-NEXT:    vmovdqu64 %zmm3, (%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm11, 256(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm10, 320(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm9, 128(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm0, 192(%r10)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <16 x i32>, <16 x i32>* %in.vecptr0, align 32

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
index 53a0496d5e465..6de0d939af687 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
@@ -106,46 +106,46 @@ define void @store_i64_stride3_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
 ; AVX1-LABEL: store_i64_stride3_vf4:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovapd (%rdi), %ymm0
-; AVX1-NEXT:    vmovapd (%rsi), %ymm1
-; AVX1-NEXT:    vmovapd (%rdx), %ymm2
-; AVX1-NEXT:    vmovapd 16(%rdi), %xmm3
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = ymm1[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm4[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3]
-; AVX1-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm4
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, (%rdi), %ymm5, %ymm5
-; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
+; AVX1-NEXT:    vmovapd (%rdx), %ymm1
+; AVX1-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm2
+; AVX1-NEXT:    vmovaps (%rdi), %xmm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm3[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX1-NEXT:    vmovapd 16(%rdx), %xmm3
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 24(%rsi), %ymm4
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
-; AVX1-NEXT:    vmovaps %ymm4, (%rcx)
-; AVX1-NEXT:    vmovapd %ymm3, 64(%rcx)
 ; AVX1-NEXT:    vmovapd %ymm0, 32(%rcx)
+; AVX1-NEXT:    vmovapd %ymm3, 64(%rcx)
+; AVX1-NEXT:    vmovaps %ymm2, (%rcx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_i64_stride3_vf4:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovaps (%rdi), %ymm0
-; AVX2-NEXT:    vmovaps (%rsi), %ymm1
-; AVX2-NEXT:    vmovaps (%rdx), %ymm2
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm3 = ymm1[2,3,0,1,6,7,4,5]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7]
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd (%rdx), %ymm2
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX2-NEXT:    vmovaps %ymm1, 64(%rcx)
-; AVX2-NEXT:    vmovaps %ymm3, 32(%rcx)
+; AVX2-NEXT:    vmovaps (%rdx), %ymm1
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT:    vmovaps 16(%rdx), %xmm3
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm3
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm3
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm0[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm4 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-NEXT:    vmovaps %ymm0, 32(%rcx)
+; AVX2-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm2, 64(%rcx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -154,8 +154,8 @@ define void @store_i64_stride3_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,3,7,11]
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,11,15,3]
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,4,8,1,5,9,2,6]
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
 ; AVX512-NEXT:    vmovdqu64 %zmm3, (%rcx)
@@ -225,40 +225,40 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovapd 32(%rdi), %ymm0
 ; AVX1-NEXT:    vmovapd (%rdi), %ymm1
-; AVX1-NEXT:    vmovapd (%rsi), %ymm2
-; AVX1-NEXT:    vmovapd 32(%rsi), %ymm3
-; AVX1-NEXT:    vmovapd (%rdx), %ymm4
-; AVX1-NEXT:    vmovapd 32(%rdx), %ymm5
-; AVX1-NEXT:    vinsertf128 $1, (%rdx), %ymm1, %ymm6
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm7 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, (%rdi), %ymm7, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
-; AVX1-NEXT:    vmovapd 48(%rdi), %xmm7
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm5[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm8 = ymm3[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm5[2,3],ymm8[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2],ymm7[3]
-; AVX1-NEXT:    vinsertf128 $1, 32(%rdx), %ymm0, %ymm8
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm9 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm9, %ymm9
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
-; AVX1-NEXT:    vmovapd 16(%rdi), %xmm9
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm4[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm10 = ymm2[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],ymm10[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2],ymm9[3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = ymm3[1,0,2,2]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm2 = ymm2[1,0,2,2]
+; AVX1-NEXT:    vmovapd 32(%rdx), %ymm2
+; AVX1-NEXT:    vmovapd (%rdx), %ymm3
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdx), %ymm0, %ymm4
+; AVX1-NEXT:    vmovaps (%rdi), %xmm5
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm7 = xmm6[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7]
+; AVX1-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm7 = xmm5[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
+; AVX1-NEXT:    vmovapd 16(%rdx), %xmm6
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm1[2,3],ymm3[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 24(%rsi), %ymm7
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3]
+; AVX1-NEXT:    vmovapd 48(%rdx), %xmm7
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],ymm2[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 56(%rsi), %ymm8
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm8[0],ymm2[1],ymm8[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3]
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3]
 ; AVX1-NEXT:    vmovapd %ymm1, 32(%rcx)
-; AVX1-NEXT:    vmovapd %ymm9, 64(%rcx)
-; AVX1-NEXT:    vmovaps %ymm8, 96(%rcx)
-; AVX1-NEXT:    vmovapd %ymm7, 160(%rcx)
 ; AVX1-NEXT:    vmovapd %ymm0, 128(%rcx)
-; AVX1-NEXT:    vmovaps %ymm6, (%rcx)
+; AVX1-NEXT:    vmovapd %ymm7, 160(%rcx)
+; AVX1-NEXT:    vmovapd %ymm6, 64(%rcx)
+; AVX1-NEXT:    vmovaps %ymm5, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm4, 96(%rcx)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -266,40 +266,40 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX2-NEXT:    vmovaps (%rsi), %ymm2
-; AVX2-NEXT:    vmovaps 32(%rsi), %ymm3
-; AVX2-NEXT:    vmovaps (%rdx), %ymm4
-; AVX2-NEXT:    vmovaps 32(%rdx), %ymm5
+; AVX2-NEXT:    vmovaps 32(%rdx), %ymm2
+; AVX2-NEXT:    vmovaps (%rdx), %ymm3
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm3[2,3]
+; AVX2-NEXT:    vmovaps 16(%rdx), %xmm5
+; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
+; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm5
+; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
+; AVX2-NEXT:    vinsertf128 $1, 32(%rdx), %ymm0, %ymm5
 ; AVX2-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm0[0,1,2,1]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm1[0,1,2,1]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd (%rdx), %ymm7
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],ymm2[2,3]
+; AVX2-NEXT:    vmovaps 48(%rdx), %xmm7
+; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
+; AVX2-NEXT:    vbroadcastsd 56(%rsi), %ymm7
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm7 = ymm3[2,3,0,1,6,7,4,5]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm1[4,5],ymm7[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm5[2,3],ymm7[4,5,6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm1[1],ymm3[1],ymm1[3],ymm3[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5],ymm5[6,7]
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3],ymm1[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd 32(%rdx), %ymm5
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm4[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm8[0,1],ymm5[2,3,4,5],ymm8[6,7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm7
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm2 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
 ; AVX2-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX2-NEXT:    vmovaps %ymm5, 64(%rcx)
-; AVX2-NEXT:    vmovaps %ymm1, 96(%rcx)
-; AVX2-NEXT:    vmovaps %ymm3, 160(%rcx)
-; AVX2-NEXT:    vmovaps %ymm7, 128(%rcx)
-; AVX2-NEXT:    vmovaps %ymm6, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm1, 128(%rcx)
+; AVX2-NEXT:    vmovaps %ymm7, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm6, 160(%rcx)
+; AVX2-NEXT:    vmovaps %ymm5, 96(%rcx)
+; AVX2-NEXT:    vmovaps %ymm4, 64(%rcx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -312,15 +312,15 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,9,6,7]
 ; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,3,11,u,4,12,u,5>
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [10,1,2,11,4,5,12,7]
-; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,u,14,6,u,15,7,u>
-; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,14,5,6,15]
-; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm0
-; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rcx)
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <2,11,u,3,12,u,4,13>
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,11,3,4,12,6,7]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <5,13,u,6,14,u,7,15>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,14,3,4,15,6,7]
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm3, %zmm1
+; AVX512-NEXT:    vmovdqu64 %zmm1, 128(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm4, (%rcx)
 ; AVX512-NEXT:    vzeroupper
@@ -446,168 +446,159 @@ define void @store_i64_stride3_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
 ;
 ; AVX1-LABEL: store_i64_stride3_vf16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $40, %rsp
-; AVX1-NEXT:    vmovapd 32(%rdi), %ymm2
-; AVX1-NEXT:    vmovapd 64(%rdi), %ymm4
-; AVX1-NEXT:    vmovaps (%rdi), %ymm0
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovapd 32(%rsi), %ymm6
-; AVX1-NEXT:    vmovapd 64(%rsi), %ymm9
-; AVX1-NEXT:    vmovapd 96(%rsi), %ymm5
+; AVX1-NEXT:    vmovapd (%rdi), %ymm13
+; AVX1-NEXT:    vmovapd 96(%rdi), %ymm14
+; AVX1-NEXT:    vmovapd 32(%rdi), %ymm4
+; AVX1-NEXT:    vmovapd 64(%rdi), %ymm7
+; AVX1-NEXT:    vmovapd (%rdx), %ymm3
+; AVX1-NEXT:    vmovapd 96(%rdx), %ymm5
 ; AVX1-NEXT:    vmovapd 32(%rdx), %ymm8
-; AVX1-NEXT:    vmovapd 64(%rdx), %ymm11
-; AVX1-NEXT:    vmovapd 96(%rdx), %ymm7
+; AVX1-NEXT:    vmovapd 64(%rdx), %ymm10
 ; AVX1-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, (%rdi), %ymm3, %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
-; AVX1-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX1-NEXT:    vmovapd 80(%rdi), %xmm3
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm10 = ymm9[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm11[2,3],ymm10[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm10[0],ymm3[1],ymm10[2],ymm3[3]
-; AVX1-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdx), %ymm4, %ymm10
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm12 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
-; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovapd 48(%rdi), %xmm12
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm8[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm13 = ymm6[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm8[2,3],ymm13[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm13[0],ymm12[1],ymm13[2],ymm12[3]
-; AVX1-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vinsertf128 $1, 32(%rdx), %ymm2, %ymm13
-; AVX1-NEXT:    vmovapd %ymm2, %ymm12
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm14 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, 32(%rdi), %ymm14, %ymm14
-; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
-; AVX1-NEXT:    vmovapd 112(%rdi), %xmm14
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm7[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm15 = ymm5[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm7[2,3],ymm15[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm15 = mem[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm15, %ymm15
-; AVX1-NEXT:    vmovapd 96(%rdi), %ymm2
-; AVX1-NEXT:    vinsertf128 $1, 96(%rdx), %ymm2, %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7]
-; AVX1-NEXT:    vmovapd (%rdx), %ymm15
-; AVX1-NEXT:    vmovapd 16(%rdi), %xmm3
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm15[2,3]
-; AVX1-NEXT:    vmovapd (%rsi), %ymm0
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm10 = ymm0[0,0,3,2]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm15[2,3],ymm10[2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm10[0],ymm3[1],ymm10[2],ymm3[3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm9 = ymm9[1,0,2,2]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm9[0,1],ymm4[2],ymm9[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm6 = ymm6[1,0,2,2]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm5 = ymm5[1,0,2,2]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm7[1],ymm2[2,3]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2]
-; AVX1-NEXT:    vblendpd $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3]
-; AVX1-NEXT:    vmovapd %ymm0, 32(%rcx)
-; AVX1-NEXT:    vmovapd %ymm3, 64(%rcx)
-; AVX1-NEXT:    vmovaps %ymm1, 288(%rcx)
-; AVX1-NEXT:    vmovapd %ymm14, 352(%rcx)
-; AVX1-NEXT:    vmovapd %ymm2, 320(%rcx)
-; AVX1-NEXT:    vmovaps %ymm13, 96(%rcx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 160(%rcx)
-; AVX1-NEXT:    vmovapd %ymm6, 128(%rcx)
-; AVX1-NEXT:    vmovapd %ymm4, 224(%rcx)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 192(%rcx)
+; AVX1-NEXT:    vmovaps (%rdi), %xmm6
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm0
+; AVX1-NEXT:    vmovaps 64(%rdi), %xmm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm9 = xmm6[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm9, %ymm6
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5],ymm6[6,7]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vinsertf128 $1, 64(%rdx), %ymm0, %ymm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm9 = xmm2[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm9, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm6[4,5],ymm2[6,7]
+; AVX1-NEXT:    vinsertf128 $1, 32(%rdx), %ymm0, %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm9 = xmm0[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm9, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdx), %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps 96(%rdi), %xmm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm11 = xmm2[0],mem[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm11, %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX1-NEXT:    vmovapd 80(%rdx), %xmm0
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm7[2,3],ymm10[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 88(%rsi), %ymm2
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm2[2],ymm0[3]
+; AVX1-NEXT:    vmovapd 48(%rdx), %xmm0
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm4[2,3],ymm8[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 56(%rsi), %ymm2
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3]
+; AVX1-NEXT:    vmovapd 112(%rdx), %xmm2
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm14[2,3],ymm5[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm15[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 120(%rsi), %ymm15
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm15[2],ymm2[3]
+; AVX1-NEXT:    vmovapd 16(%rdx), %xmm15
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm13[2,3],ymm3[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3]
+; AVX1-NEXT:    vbroadcastsd 24(%rsi), %ymm15
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm15 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm10 = ymm15[0],ymm10[1],ymm15[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm14[2],ymm5[3]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm8[0],ymm3[1],ymm8[2,3]
+; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3]
+; AVX1-NEXT:    vmovapd %ymm3, 32(%rcx)
+; AVX1-NEXT:    vmovapd %ymm5, 320(%rcx)
+; AVX1-NEXT:    vmovapd %ymm4, 128(%rcx)
+; AVX1-NEXT:    vmovapd %ymm7, 224(%rcx)
+; AVX1-NEXT:    vmovapd %ymm1, 64(%rcx)
+; AVX1-NEXT:    vmovapd %ymm2, 352(%rcx)
+; AVX1-NEXT:    vmovapd %ymm0, 160(%rcx)
+; AVX1-NEXT:    vmovapd %ymm12, 256(%rcx)
+; AVX1-NEXT:    vmovaps %ymm11, 288(%rcx)
+; AVX1-NEXT:    vmovaps %ymm9, 96(%rcx)
+; AVX1-NEXT:    vmovaps %ymm6, 192(%rcx)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 256(%rcx)
-; AVX1-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX1-NEXT:    addq $40, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_i64_stride3_vf16:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovaps (%rdi), %ymm0
-; AVX2-NEXT:    vmovaps 32(%rdi), %ymm9
+; AVX2-NEXT:    vmovaps 32(%rdi), %ymm4
 ; AVX2-NEXT:    vmovaps 64(%rdi), %ymm7
-; AVX2-NEXT:    vmovaps 96(%rdi), %ymm5
-; AVX2-NEXT:    vmovaps (%rsi), %ymm2
-; AVX2-NEXT:    vmovaps 32(%rsi), %ymm12
-; AVX2-NEXT:    vmovaps 64(%rsi), %ymm11
-; AVX2-NEXT:    vmovaps 96(%rsi), %ymm8
-; AVX2-NEXT:    vmovaps (%rdx), %ymm3
-; AVX2-NEXT:    vmovaps 32(%rdx), %ymm13
-; AVX2-NEXT:    vmovaps 64(%rdx), %ymm14
-; AVX2-NEXT:    vmovaps 96(%rdx), %ymm10
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm0[0,1,2,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd (%rdx), %ymm4
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5],ymm1[6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm11[1],ymm7[3],ymm11[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm14[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm15 = ymm7[0,1,2,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm15[0,1],ymm6[2,3],ymm15[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd 64(%rdx), %ymm15
-; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm15[4,5],ymm6[6,7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5],ymm11[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm14[2,3],ymm7[4,5,6,7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm11 = ymm12[2,3,0,1,6,7,4,5]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm9[4,5],ymm11[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2,3],ymm11[4,5,6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm9[1],ymm12[1],ymm9[3],ymm12[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm13 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm13[2,3],ymm9[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd 32(%rdx), %ymm13
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5],ymm9[6,7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm13 = ymm8[2,3,0,1,6,7,4,5]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm5[4,5],ymm13[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm10[2,3],ymm13[4,5,6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm5[1],ymm8[1],ymm5[3],ymm8[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3,4,5],ymm10[6,7]
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm10 = mem[0,0]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
-; AVX2-NEXT:    vbroadcastsd 96(%rdx), %ymm10
-; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5],ymm5[6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,2,3,3]
-; AVX2-NEXT:    vpermpd {{.*#+}} ymm14 = ymm3[2,1,2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm14[0,1],ymm10[2,3,4,5],ymm14[6,7]
-; AVX2-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vmovaps 96(%rdi), %ymm3
+; AVX2-NEXT:    vmovaps (%rdx), %ymm2
+; AVX2-NEXT:    vmovaps 96(%rdx), %ymm6
+; AVX2-NEXT:    vmovaps 32(%rdx), %ymm8
+; AVX2-NEXT:    vmovaps 64(%rdx), %ymm10
+; AVX2-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm1
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1],ymm5[2,3],ymm9[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7]
+; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vinsertf128 $1, 64(%rdx), %ymm0, %ymm5
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm9 = mem[0,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm11 = ymm7[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5],ymm9[6,7]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm7[2,3],ymm10[2,3]
+; AVX2-NEXT:    vmovaps 80(%rdx), %xmm11
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5],ymm9[6,7]
+; AVX2-NEXT:    vbroadcastsd 88(%rsi), %ymm11
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5],ymm9[6,7]
+; AVX2-NEXT:    vinsertf128 $1, 32(%rdx), %ymm0, %ymm11
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm12 = mem[0,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm13 = ymm4[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm4[2,3],ymm8[2,3]
+; AVX2-NEXT:    vmovaps 48(%rdx), %xmm13
+; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7]
+; AVX2-NEXT:    vbroadcastsd 56(%rsi), %ymm13
+; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
+; AVX2-NEXT:    vinsertf128 $1, 96(%rdx), %ymm0, %ymm13
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm14 = mem[0,0]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm15 = ymm3[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm3[2,3],ymm6[2,3]
+; AVX2-NEXT:    vmovaps 112(%rdx), %xmm15
+; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5],ymm14[6,7]
+; AVX2-NEXT:    vbroadcastsd 120(%rsi), %ymm15
+; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm0[2,3],ymm2[2,3]
+; AVX2-NEXT:    vmovaps 16(%rdx), %xmm1
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7]
+; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm15
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5],ymm1[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm15 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3],ymm15[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5],ymm10[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm10 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5],ymm8[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5],ymm6[6,7]
+; AVX2-NEXT:    vpermilps {{.*#+}} ymm6 = mem[2,3,0,1,6,7,4,5]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7]
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
 ; AVX2-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX2-NEXT:    vmovaps %ymm10, 64(%rcx)
-; AVX2-NEXT:    vmovaps %ymm5, 288(%rcx)
-; AVX2-NEXT:    vmovaps %ymm8, 352(%rcx)
-; AVX2-NEXT:    vmovaps %ymm13, 320(%rcx)
-; AVX2-NEXT:    vmovaps %ymm9, 96(%rcx)
-; AVX2-NEXT:    vmovaps %ymm12, 160(%rcx)
-; AVX2-NEXT:    vmovaps %ymm11, 128(%rcx)
+; AVX2-NEXT:    vmovaps %ymm3, 320(%rcx)
+; AVX2-NEXT:    vmovaps %ymm4, 128(%rcx)
 ; AVX2-NEXT:    vmovaps %ymm7, 224(%rcx)
-; AVX2-NEXT:    vmovaps %ymm6, 192(%rcx)
-; AVX2-NEXT:    vmovaps %ymm4, 256(%rcx)
-; AVX2-NEXT:    vmovaps %ymm1, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm1, 64(%rcx)
+; AVX2-NEXT:    vmovaps %ymm14, 352(%rcx)
+; AVX2-NEXT:    vmovaps %ymm13, 288(%rcx)
+; AVX2-NEXT:    vmovaps %ymm12, 160(%rcx)
+; AVX2-NEXT:    vmovaps %ymm11, 96(%rcx)
+; AVX2-NEXT:    vmovaps %ymm9, 256(%rcx)
+; AVX2-NEXT:    vmovaps %ymm5, 192(%rcx)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -624,23 +615,23 @@ define void @store_i64_stride3_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
 ; AVX512-NEXT:    vpermt2q %zmm2, %zmm6, %zmm7
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,8,3,4,9,6,7]
 ; AVX512-NEXT:    vpermt2q %zmm4, %zmm8, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <5,u,14,6,u,15,7,u>
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <5,13,u,6,14,u,7,15>
 ; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm10
-; AVX512-NEXT:    vpermt2q %zmm1, %zmm9, %zmm10
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,13,2,3,14,5,6,15]
-; AVX512-NEXT:    vpermt2q %zmm5, %zmm11, %zmm10
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <u,3,11,u,4,12,u,5>
-; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm13
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm12, %zmm13
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [10,1,2,11,4,5,12,7]
-; AVX512-NEXT:    vpermt2q %zmm5, %zmm14, %zmm13
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm9, %zmm10
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,1,14,3,4,15,6,7]
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm11, %zmm10
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <2,11,u,3,12,u,4,13>
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm13
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm12, %zmm13
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,1,11,3,4,12,6,7]
+; AVX512-NEXT:    vpermt2q %zmm3, %zmm14, %zmm13
 ; AVX512-NEXT:    vpermt2q %zmm3, %zmm6, %zmm1
 ; AVX512-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
-; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm9
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm11, %zmm9
-; AVX512-NEXT:    vpermt2q %zmm2, %zmm12, %zmm0
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm14, %zmm0
-; AVX512-NEXT:    vmovdqu64 %zmm0, 64(%rcx)
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm2, %zmm9
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm11, %zmm9
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm12, %zmm4
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm14, %zmm4
+; AVX512-NEXT:    vmovdqu64 %zmm4, 64(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm9, 128(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm1, 192(%rcx)
 ; AVX512-NEXT:    vmovdqu64 %zmm13, 256(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
index 56d07489ce7d2..77c17fb20aa7e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
@@ -258,107 +258,107 @@ define void @store_i64_stride4_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
 ;
 ; AVX1-LABEL: store_i64_stride4_vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps (%rdx), %ymm2
-; AVX1-NEXT:    vmovaps 32(%rdx), %ymm1
-; AVX1-NEXT:    vmovaps (%rcx), %ymm3
+; AVX1-NEXT:    vmovaps 32(%rdx), %ymm2
+; AVX1-NEXT:    vmovaps (%rdx), %ymm3
 ; AVX1-NEXT:    vmovaps 32(%rcx), %ymm4
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
-; AVX1-NEXT:    vmovaps 48(%rsi), %xmm5
-; AVX1-NEXT:    vmovaps 48(%rdi), %xmm6
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm6[1],xmm5[1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX1-NEXT:    vmovaps 16(%rsi), %xmm5
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm6
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm4[4,5,6,7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm5[1]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-NEXT:    vmovaps 32(%rsi), %xmm3
-; AVX1-NEXT:    vmovaps 32(%rdi), %xmm5
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm5[0],xmm3[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm5[1],xmm3[1]
-; AVX1-NEXT:    vmovaps (%rsi), %xmm5
-; AVX1-NEXT:    vmovaps (%rdi), %xmm7
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm5[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1]
+; AVX1-NEXT:    vmovaps (%rcx), %ymm5
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
+; AVX1-NEXT:    vmovaps 16(%rsi), %xmm6
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm7
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm7[0],xmm6[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm9 = ymm2[1],ymm4[1],ymm2[3],ymm4[3]
+; AVX1-NEXT:    vmovaps 48(%rsi), %xmm0
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm1
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm0[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm2
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm3
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm3[1],xmm2[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm3
+; AVX1-NEXT:    vmovaps (%rdi), %xmm5
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm5[1],xmm3[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm5[0],xmm3[0]
+; AVX1-NEXT:    vmovaps (%rcx), %xmm5
 ; AVX1-NEXT:    vmovaps 32(%rcx), %xmm7
+; AVX1-NEXT:    vmovaps (%rdx), %xmm0
 ; AVX1-NEXT:    vmovaps 32(%rdx), %xmm1
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm1[0],xmm7[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1]
-; AVX1-NEXT:    vmovaps (%rcx), %xmm7
-; AVX1-NEXT:    vmovaps (%rdx), %xmm2
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm2[0],xmm7[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm7[1]
-; AVX1-NEXT:    vmovaps %xmm2, 48(%r8)
-; AVX1-NEXT:    vmovaps %xmm6, 16(%r8)
-; AVX1-NEXT:    vmovaps %xmm1, 176(%r8)
-; AVX1-NEXT:    vmovaps %xmm4, 144(%r8)
-; AVX1-NEXT:    vmovaps %xmm5, 32(%r8)
-; AVX1-NEXT:    vmovaps %xmm0, (%r8)
-; AVX1-NEXT:    vmovaps %xmm3, 160(%r8)
-; AVX1-NEXT:    vmovaps %xmm12, 128(%r8)
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm1[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm5[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX1-NEXT:    vmovaps %xmm0, 16(%r8)
+; AVX1-NEXT:    vmovaps %xmm7, 48(%r8)
+; AVX1-NEXT:    vmovaps %xmm1, 144(%r8)
+; AVX1-NEXT:    vmovaps %xmm4, 176(%r8)
+; AVX1-NEXT:    vmovaps %xmm3, (%r8)
+; AVX1-NEXT:    vmovaps %xmm6, 32(%r8)
+; AVX1-NEXT:    vmovaps %xmm2, 128(%r8)
+; AVX1-NEXT:    vmovaps %xmm12, 160(%r8)
 ; AVX1-NEXT:    vmovaps %ymm11, 96(%r8)
-; AVX1-NEXT:    vmovaps %ymm10, 64(%r8)
+; AVX1-NEXT:    vmovaps %ymm10, 192(%r8)
 ; AVX1-NEXT:    vmovaps %ymm9, 224(%r8)
-; AVX1-NEXT:    vmovaps %ymm8, 192(%r8)
+; AVX1-NEXT:    vmovaps %ymm8, 64(%r8)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_i64_stride4_vf8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovaps (%rdi), %ymm2
-; AVX2-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX2-NEXT:    vmovaps (%rsi), %ymm3
+; AVX2-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX2-NEXT:    vmovaps (%rdi), %ymm3
 ; AVX2-NEXT:    vmovaps 32(%rsi), %ymm4
-; AVX2-NEXT:    vmovaps (%rdx), %ymm5
+; AVX2-NEXT:    vmovaps (%rsi), %ymm5
 ; AVX2-NEXT:    vmovaps 32(%rdx), %ymm6
-; AVX2-NEXT:    vmovaps (%rcx), %ymm7
+; AVX2-NEXT:    vmovaps (%rdx), %ymm7
 ; AVX2-NEXT:    vmovaps 32(%rcx), %ymm8
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3],ymm0[2,3]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm1[2,3],ymm6[2,3]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm7[0],ymm5[2],ymm7[2]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm6[2,3],ymm4[2,3]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm7[1],ymm5[3],ymm7[3]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm2[2,3],ymm5[2,3]
-; AVX2-NEXT:    vmovaps (%rsi), %xmm3
+; AVX2-NEXT:    vmovaps (%rcx), %ymm9
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm1[2,3],ymm0[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm2[1],ymm4[1],ymm2[3],ymm4[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm10[2,3],ymm1[2,3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm2[2,3],ymm6[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm9[1],ymm7[3],ymm9[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm3[2,3],ymm4[2,3]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm4
 ; AVX2-NEXT:    vmovaps 32(%rsi), %xmm5
 ; AVX2-NEXT:    vmovaps (%rdi), %xmm6
 ; AVX2-NEXT:    vmovaps 32(%rdi), %xmm7
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm5[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm5[1]
 ; AVX2-NEXT:    vmovaps (%rcx), %xmm1
-; AVX2-NEXT:    vmovaps 32(%rcx), %xmm4
-; AVX2-NEXT:    vmovaps (%rdx), %xmm2
+; AVX2-NEXT:    vmovaps 32(%rcx), %xmm2
+; AVX2-NEXT:    vmovaps (%rdx), %xmm3
 ; AVX2-NEXT:    vmovaps 32(%rdx), %xmm0
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm4[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm3[0]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm2[0],xmm1[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm3[1]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
-; AVX2-NEXT:    vmovaps %xmm1, 48(%r8)
-; AVX2-NEXT:    vmovaps %xmm3, 32(%r8)
-; AVX2-NEXT:    vmovaps %xmm7, 16(%r8)
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm13 = xmm0[1],xmm2[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm6[1],xmm4[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX2-NEXT:    vmovaps %xmm1, 16(%r8)
 ; AVX2-NEXT:    vmovaps %xmm4, (%r8)
-; AVX2-NEXT:    vmovaps %xmm0, 176(%r8)
-; AVX2-NEXT:    vmovaps %xmm5, 160(%r8)
-; AVX2-NEXT:    vmovaps %xmm13, 144(%r8)
-; AVX2-NEXT:    vmovaps %xmm12, 128(%r8)
-; AVX2-NEXT:    vmovaps %ymm11, 96(%r8)
-; AVX2-NEXT:    vmovaps %ymm10, 64(%r8)
-; AVX2-NEXT:    vmovaps %ymm8, 224(%r8)
-; AVX2-NEXT:    vmovaps %ymm9, 192(%r8)
+; AVX2-NEXT:    vmovaps %xmm7, 48(%r8)
+; AVX2-NEXT:    vmovaps %xmm2, 32(%r8)
+; AVX2-NEXT:    vmovaps %xmm0, 144(%r8)
+; AVX2-NEXT:    vmovaps %xmm5, 128(%r8)
+; AVX2-NEXT:    vmovaps %xmm13, 176(%r8)
+; AVX2-NEXT:    vmovaps %xmm12, 160(%r8)
+; AVX2-NEXT:    vmovaps %ymm9, 96(%r8)
+; AVX2-NEXT:    vmovaps %ymm8, 192(%r8)
+; AVX2-NEXT:    vmovaps %ymm10, 224(%r8)
+; AVX2-NEXT:    vmovaps %ymm11, 64(%r8)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -617,54 +617,54 @@ define void @store_i64_stride4_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vmovaps 64(%rsi), %xmm2
 ; AVX1-NEXT:    vmovaps 64(%rdi), %xmm3
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm2[0]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm3[1],xmm2[1]
 ; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovaps 32(%rcx), %xmm4
 ; AVX1-NEXT:    vmovaps 64(%rcx), %xmm5
 ; AVX1-NEXT:    vmovaps 64(%rdx), %xmm7
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm15 = xmm7[0],xmm5[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm3[1],xmm2[1]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm13 = xmm7[1],xmm5[1]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm7[1],xmm5[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm14 = xmm3[0],xmm2[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm7[0],xmm5[0]
 ; AVX1-NEXT:    vmovaps 32(%rsi), %xmm5
 ; AVX1-NEXT:    vmovaps 32(%rdi), %xmm7
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm5[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm7[1],xmm5[1]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm5[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm7[0],xmm5[0]
 ; AVX1-NEXT:    vmovaps 32(%rdx), %xmm7
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm11 = xmm7[0],xmm4[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm7[1],xmm4[1]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm7[1],xmm4[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm7[0],xmm4[0]
 ; AVX1-NEXT:    vmovaps 96(%rsi), %xmm7
 ; AVX1-NEXT:    vmovaps 96(%rdi), %xmm0
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm7[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm0[0],xmm7[0]
 ; AVX1-NEXT:    vmovaps 96(%rcx), %xmm7
 ; AVX1-NEXT:    vmovaps 96(%rdx), %xmm0
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm0[0],xmm7[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm7[0]
 ; AVX1-NEXT:    vmovaps (%rsi), %xmm7
 ; AVX1-NEXT:    vmovaps (%rdi), %xmm0
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm0[0],xmm7[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm7[0]
 ; AVX1-NEXT:    vmovaps (%rcx), %xmm7
 ; AVX1-NEXT:    vmovaps (%rdx), %xmm0
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm7[0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
-; AVX1-NEXT:    vmovaps %xmm0, 48(%r8)
-; AVX1-NEXT:    vmovaps %xmm2, 32(%r8)
-; AVX1-NEXT:    vmovaps %xmm1, 16(%r8)
-; AVX1-NEXT:    vmovaps %xmm3, (%r8)
-; AVX1-NEXT:    vmovaps %xmm4, 432(%r8)
-; AVX1-NEXT:    vmovaps %xmm6, 416(%r8)
-; AVX1-NEXT:    vmovaps %xmm5, 400(%r8)
-; AVX1-NEXT:    vmovaps %xmm8, 384(%r8)
-; AVX1-NEXT:    vmovaps %xmm9, 176(%r8)
-; AVX1-NEXT:    vmovaps %xmm10, 160(%r8)
-; AVX1-NEXT:    vmovaps %xmm11, 144(%r8)
-; AVX1-NEXT:    vmovaps %xmm12, 128(%r8)
-; AVX1-NEXT:    vmovaps %xmm13, 304(%r8)
-; AVX1-NEXT:    vmovaps %xmm14, 288(%r8)
-; AVX1-NEXT:    vmovaps %xmm15, 272(%r8)
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm7[0]
+; AVX1-NEXT:    vmovaps %xmm0, 16(%r8)
+; AVX1-NEXT:    vmovaps %xmm2, (%r8)
+; AVX1-NEXT:    vmovaps %xmm1, 48(%r8)
+; AVX1-NEXT:    vmovaps %xmm3, 32(%r8)
+; AVX1-NEXT:    vmovaps %xmm4, 400(%r8)
+; AVX1-NEXT:    vmovaps %xmm6, 384(%r8)
+; AVX1-NEXT:    vmovaps %xmm5, 432(%r8)
+; AVX1-NEXT:    vmovaps %xmm8, 416(%r8)
+; AVX1-NEXT:    vmovaps %xmm9, 144(%r8)
+; AVX1-NEXT:    vmovaps %xmm10, 128(%r8)
+; AVX1-NEXT:    vmovaps %xmm11, 176(%r8)
+; AVX1-NEXT:    vmovaps %xmm12, 160(%r8)
+; AVX1-NEXT:    vmovaps %xmm13, 272(%r8)
+; AVX1-NEXT:    vmovaps %xmm14, 256(%r8)
+; AVX1-NEXT:    vmovaps %xmm15, 304(%r8)
 ; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 256(%r8)
+; AVX1-NEXT:    vmovaps %xmm0, 288(%r8)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm0, 448(%r8)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -740,52 +740,52 @@ define void @store_i64_stride4_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
 ; AVX2-NEXT:    vmovaps 64(%rsi), %xmm3
 ; AVX2-NEXT:    vmovaps 32(%rdi), %xmm4
 ; AVX2-NEXT:    vmovaps 64(%rdi), %xmm5
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm3[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm3[1]
 ; AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-NEXT:    vmovaps 32(%rcx), %xmm6
 ; AVX2-NEXT:    vmovaps 64(%rcx), %xmm7
 ; AVX2-NEXT:    vmovaps 64(%rdx), %xmm0
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm5[1],xmm3[1]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm15 = xmm0[0],xmm7[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm13 = xmm0[1],xmm7[1]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm4[0],xmm2[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm4[1],xmm2[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm14 = xmm5[0],xmm3[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm0[1],xmm7[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm7[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm4[1],xmm2[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0]
 ; AVX2-NEXT:    vmovaps 32(%rdx), %xmm4
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm11 = xmm4[0],xmm6[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm4[1],xmm6[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm6[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm4[0],xmm6[0]
 ; AVX2-NEXT:    vmovaps 96(%rsi), %xmm6
 ; AVX2-NEXT:    vmovaps 96(%rdi), %xmm0
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm6[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm0[0],xmm6[0]
 ; AVX2-NEXT:    vmovaps 96(%rcx), %xmm6
 ; AVX2-NEXT:    vmovaps 96(%rdx), %xmm0
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm0[0],xmm6[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm6[0]
 ; AVX2-NEXT:    vmovaps (%rsi), %xmm6
 ; AVX2-NEXT:    vmovaps (%rdi), %xmm0
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm0[0],xmm6[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm6[0]
 ; AVX2-NEXT:    vmovaps (%rcx), %xmm6
 ; AVX2-NEXT:    vmovaps (%rdx), %xmm0
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm6[0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1]
-; AVX2-NEXT:    vmovaps %xmm0, 48(%r8)
-; AVX2-NEXT:    vmovaps %xmm2, 32(%r8)
-; AVX2-NEXT:    vmovaps %xmm1, 16(%r8)
-; AVX2-NEXT:    vmovaps %xmm3, (%r8)
-; AVX2-NEXT:    vmovaps %xmm4, 432(%r8)
-; AVX2-NEXT:    vmovaps %xmm7, 416(%r8)
-; AVX2-NEXT:    vmovaps %xmm5, 400(%r8)
-; AVX2-NEXT:    vmovaps %xmm8, 384(%r8)
-; AVX2-NEXT:    vmovaps %xmm9, 176(%r8)
-; AVX2-NEXT:    vmovaps %xmm10, 160(%r8)
-; AVX2-NEXT:    vmovaps %xmm11, 144(%r8)
-; AVX2-NEXT:    vmovaps %xmm12, 128(%r8)
-; AVX2-NEXT:    vmovaps %xmm13, 304(%r8)
-; AVX2-NEXT:    vmovaps %xmm14, 288(%r8)
-; AVX2-NEXT:    vmovaps %xmm15, 272(%r8)
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm6[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; AVX2-NEXT:    vmovaps %xmm0, 16(%r8)
+; AVX2-NEXT:    vmovaps %xmm2, (%r8)
+; AVX2-NEXT:    vmovaps %xmm1, 48(%r8)
+; AVX2-NEXT:    vmovaps %xmm3, 32(%r8)
+; AVX2-NEXT:    vmovaps %xmm4, 400(%r8)
+; AVX2-NEXT:    vmovaps %xmm7, 384(%r8)
+; AVX2-NEXT:    vmovaps %xmm5, 432(%r8)
+; AVX2-NEXT:    vmovaps %xmm8, 416(%r8)
+; AVX2-NEXT:    vmovaps %xmm9, 144(%r8)
+; AVX2-NEXT:    vmovaps %xmm10, 128(%r8)
+; AVX2-NEXT:    vmovaps %xmm11, 176(%r8)
+; AVX2-NEXT:    vmovaps %xmm12, 160(%r8)
+; AVX2-NEXT:    vmovaps %xmm13, 272(%r8)
+; AVX2-NEXT:    vmovaps %xmm14, 256(%r8)
+; AVX2-NEXT:    vmovaps %xmm15, 304(%r8)
 ; AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT:    vmovaps %xmm0, 256(%r8)
+; AVX2-NEXT:    vmovaps %xmm0, 288(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-NEXT:    vmovaps %ymm0, 448(%r8)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
index 1c2faae8fdfd7..29e10c47383b8 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
@@ -169,92 +169,84 @@ define void @store_i64_stride6_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
 ; AVX1-LABEL: store_i64_stride6_vf4:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovapd (%rdi), %ymm0
-; AVX1-NEXT:    vmovapd (%rsi), %ymm1
-; AVX1-NEXT:    vmovaps (%rdx), %ymm8
-; AVX1-NEXT:    vmovapd (%r8), %ymm3
-; AVX1-NEXT:    vmovapd (%r9), %ymm4
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
-; AVX1-NEXT:    vmovaps (%rsi), %xmm6
-; AVX1-NEXT:    vmovaps (%rdi), %xmm7
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm6[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3]
-; AVX1-NEXT:    vmovaps (%rcx), %xmm5
-; AVX1-NEXT:    vinsertf128 $1, (%r9), %ymm5, %ymm9
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm10 = mem[2,3,2,3]
-; AVX1-NEXT:    vbroadcastsd 8(%r8), %ymm11
-; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],ymm1[2,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[2],ymm10[3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm8[0],mem[0],ymm8[2],mem[2]
+; AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; AVX1-NEXT:    vmovaps (%rdx), %ymm1
+; AVX1-NEXT:    vmovaps (%r8), %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
 ; AVX1-NEXT:    vmovaps 16(%rdi), %xmm3
 ; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT:    vmovapd 16(%rdx), %xmm3
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm3
 ; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],mem[1]
-; AVX1-NEXT:    vbroadcastsd 24(%r8), %ymm8
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm8[2],ymm3[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm7[0],xmm6[0]
-; AVX1-NEXT:    vmovaps (%rdx), %xmm6
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm6[0],xmm5[0]
-; AVX1-NEXT:    vmovaps %xmm5, 16(%rax)
-; AVX1-NEXT:    vmovaps %xmm4, (%rax)
-; AVX1-NEXT:    vmovaps %ymm1, 96(%rax)
-; AVX1-NEXT:    vmovapd %ymm0, 128(%rax)
-; AVX1-NEXT:    vmovaps %ymm9, 64(%rax)
-; AVX1-NEXT:    vmovapd %ymm2, 32(%rax)
-; AVX1-NEXT:    vmovapd %ymm3, 160(%rax)
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%r8), %xmm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps (%rcx), %xmm3
+; AVX1-NEXT:    vmovaps (%rdx), %xmm4
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm3[1]
+; AVX1-NEXT:    vmovaps (%r9), %xmm6
+; AVX1-NEXT:    vmovaps (%r8), %xmm7
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm7
+; AVX1-NEXT:    vmovaps (%rdi), %xmm2
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX1-NEXT:    vmovaps %xmm3, 16(%rax)
+; AVX1-NEXT:    vmovaps %xmm2, (%rax)
+; AVX1-NEXT:    vmovaps %xmm0, 48(%rax)
+; AVX1-NEXT:    vmovaps %xmm6, 32(%rax)
+; AVX1-NEXT:    vmovaps %xmm1, 80(%rax)
+; AVX1-NEXT:    vmovaps %xmm5, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm10, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm9, 160(%rax)
+; AVX1-NEXT:    vmovaps %ymm8, 96(%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_i64_stride6_vf4:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-NEXT:    vmovaps (%rdi), %ymm8
-; AVX2-NEXT:    vmovaps (%rsi), %ymm11
+; AVX2-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-NEXT:    vmovaps (%rsi), %ymm1
 ; AVX2-NEXT:    vmovaps (%rdx), %ymm2
 ; AVX2-NEXT:    vmovaps (%rcx), %ymm3
 ; AVX2-NEXT:    vmovaps (%r8), %ymm4
-; AVX2-NEXT:    vmovaps (%r9), %xmm5
-; AVX2-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm6
-; AVX2-NEXT:    vmovaps (%rcx), %xmm7
-; AVX2-NEXT:    vmovaps (%rdx), %xmm0
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm7[1]
-; AVX2-NEXT:    vbroadcastsd 8(%r8), %ymm10
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm5 = xmm5[0,0]
-; AVX2-NEXT:    vmovaps (%rsi), %xmm6
-; AVX2-NEXT:    vmovaps (%rdi), %xmm1
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm6[1]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm4[0,1],ymm10[0,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm10[0,1],ymm5[2,3],ymm10[4,5,6,7]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm8[0],ymm11[0],ymm8[2],ymm11[2]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm12[2,3],ymm10[2,3]
+; AVX2-NEXT:    vmovaps (%r9), %ymm5
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm6[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
 ; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
-; AVX2-NEXT:    vbroadcastsd 24(%r8), %ymm3
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm3[2,3]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],mem[6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm8[1],ymm11[1],ymm8[3],ymm11[3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm3[2,3]
-; AVX2-NEXT:    vbroadcastsd 16(%r9), %ymm4
-; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm6[0]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm7[0]
-; AVX2-NEXT:    vmovaps %xmm0, 16(%rax)
-; AVX2-NEXT:    vmovaps %xmm1, (%rax)
-; AVX2-NEXT:    vmovaps %ymm10, 96(%rax)
-; AVX2-NEXT:    vmovaps %ymm3, 128(%rax)
-; AVX2-NEXT:    vmovaps %ymm2, 160(%rax)
-; AVX2-NEXT:    vmovaps %ymm5, 32(%rax)
-; AVX2-NEXT:    vmovaps %ymm9, 64(%rax)
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm2[2,3],ymm7[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm1[2,3],ymm0[2,3]
+; AVX2-NEXT:    vmovaps (%rcx), %xmm1
+; AVX2-NEXT:    vmovaps (%rdx), %xmm3
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
+; AVX2-NEXT:    vmovaps (%r9), %xmm5
+; AVX2-NEXT:    vmovaps (%r8), %xmm7
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm5[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm7
+; AVX2-NEXT:    vmovaps (%rdi), %xmm2
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX2-NEXT:    vmovaps %xmm1, 16(%rax)
+; AVX2-NEXT:    vmovaps %xmm2, (%rax)
+; AVX2-NEXT:    vmovaps %xmm0, 48(%rax)
+; AVX2-NEXT:    vmovaps %xmm5, 32(%rax)
+; AVX2-NEXT:    vmovaps %xmm6, 80(%rax)
+; AVX2-NEXT:    vmovaps %xmm4, 64(%rax)
+; AVX2-NEXT:    vmovaps %ymm10, 128(%rax)
+; AVX2-NEXT:    vmovaps %ymm9, 160(%rax)
+; AVX2-NEXT:    vmovaps %ymm8, 96(%rax)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -264,22 +256,21 @@ define void @store_i64_stride6_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
 ; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
-; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
+; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,4,8,12,u,u,1,5>
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,8,12,6,7]
 ; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <1,5,u,u,10,14,2,6>
-; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,9,13,4,5,6,7]
-; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [11,15,3,7,11,15,3,7]
-; AVX512-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [10,14,2,3,4,5,11,15]
-; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <1,5,9,13,u,u,2,6>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,10,14,6,7]
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <2,6,11,15,u,u,3,7>
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,11,15,6,7]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm3, %zmm0
 ; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rax)
 ; AVX512-NEXT:    vmovdqu64 %zmm4, (%rax)
@@ -417,276 +408,250 @@ define void @store_i64_stride6_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
 ;
 ; AVX1-LABEL: store_i64_stride6_vf8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd (%rdi), %ymm15
-; AVX1-NEXT:    vmovapd 32(%rdi), %ymm12
-; AVX1-NEXT:    vmovapd (%rsi), %ymm9
-; AVX1-NEXT:    vmovapd 32(%rsi), %ymm13
-; AVX1-NEXT:    vmovapd (%r8), %ymm10
-; AVX1-NEXT:    vmovapd 32(%r8), %ymm14
-; AVX1-NEXT:    vmovapd 32(%r9), %ymm2
-; AVX1-NEXT:    vmovaps 48(%rsi), %xmm0
-; AVX1-NEXT:    vmovaps 48(%rdi), %xmm1
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
-; AVX1-NEXT:    vbroadcastsd 48(%rcx), %ymm1
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-NEXT:    vmovaps (%rdi), %ymm4
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps (%rdx), %ymm5
+; AVX1-NEXT:    vmovaps 32(%rdx), %ymm2
+; AVX1-NEXT:    vmovaps (%r8), %ymm3
+; AVX1-NEXT:    vmovaps 32(%r8), %ymm0
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX1-NEXT:    vmovaps 48(%rdx), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX1-NEXT:    vmovaps 48(%r8), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],mem[1],ymm4[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%r8), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm5[0],mem[0],ymm5[2],mem[2]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX1-NEXT:    vmovaps 32(%rcx), %xmm6
+; AVX1-NEXT:    vmovaps 32(%rdx), %xmm7
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vmovaps (%r9), %xmm1
+; AVX1-NEXT:    vmovaps 32(%r9), %xmm2
+; AVX1-NEXT:    vmovaps 32(%r8), %xmm3
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm3[1],xmm2[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm3[0],xmm2[0]
 ; AVX1-NEXT:    vmovaps (%rsi), %xmm3
-; AVX1-NEXT:    vmovaps 16(%rsi), %xmm5
-; AVX1-NEXT:    vmovaps 32(%rsi), %xmm6
-; AVX1-NEXT:    vmovaps (%rdi), %xmm4
-; AVX1-NEXT:    vmovaps 16(%rdi), %xmm11
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm5
 ; AVX1-NEXT:    vmovaps 32(%rdi), %xmm0
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm6[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm7 = ymm14[0],ymm7[1,2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3]
-; AVX1-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm11[0],ymm5[0],ymm11[2],ymm5[2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
-; AVX1-NEXT:    vbroadcastsd 16(%rcx), %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0,1,2,3,4,5],ymm7[6,7]
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm7 = mem[0,0]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm3[1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm11 = ymm1[0],ymm7[1],ymm1[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm13[2,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm12[1],ymm13[1],ymm12[3],ymm13[3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm14[2,3],ymm7[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm12 = ymm7[0],ymm1[0],ymm7[2],ymm1[3]
-; AVX1-NEXT:    vmovaps 32(%rcx), %xmm14
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
-; AVX1-NEXT:    vbroadcastsd 40(%r8), %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5],ymm1[6,7]
-; AVX1-NEXT:    vinsertf128 $1, 32(%r9), %ymm14, %ymm7
-; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm7[2,3],ymm1[4,5],ymm7[6,7]
-; AVX1-NEXT:    vmovapd (%r9), %ymm1
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm15[1],ymm9[1],ymm15[3],ymm9[3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm1[2,3],ymm9[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm10[2,3],ymm7[2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[2],ymm9[3]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm9 = mem[2,3,2,3]
-; AVX1-NEXT:    vbroadcastsd 8(%r8), %ymm10
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
-; AVX1-NEXT:    vmovaps (%rcx), %xmm10
-; AVX1-NEXT:    vinsertf128 $1, (%r9), %ymm10, %ymm15
-; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm15[2,3],ymm9[4,5],ymm15[6,7]
-; AVX1-NEXT:    vmovapd 48(%rdx), %xmm5
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],mem[1]
-; AVX1-NEXT:    vbroadcastsd 56(%r8), %ymm15
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm15[2],ymm5[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3]
-; AVX1-NEXT:    vmovapd 16(%rdx), %xmm5
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],mem[1]
-; AVX1-NEXT:    vbroadcastsd 24(%r8), %ymm15
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm15[2],ymm5[3]
-; AVX1-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm5[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm5[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm6[0]
+; AVX1-NEXT:    vmovaps (%r8), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm1[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm1[0]
+; AVX1-NEXT:    vmovaps (%rdi), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm3[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm6[0],xmm3[0]
+; AVX1-NEXT:    vmovaps (%rcx), %xmm6
+; AVX1-NEXT:    vmovaps (%rdx), %xmm0
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1]
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
-; AVX1-NEXT:    vmovaps 32(%rdx), %xmm5
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm5[0],xmm14[0]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX1-NEXT:    vmovaps (%rdx), %xmm4
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm10[0]
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovaps %xmm4, 16(%rax)
+; AVX1-NEXT:    vmovaps %xmm0, 16(%rax)
 ; AVX1-NEXT:    vmovaps %xmm3, (%rax)
+; AVX1-NEXT:    vmovaps %xmm1, 48(%rax)
+; AVX1-NEXT:    vmovaps %xmm4, 32(%rax)
+; AVX1-NEXT:    vmovaps %xmm7, 80(%rax)
+; AVX1-NEXT:    vmovaps %xmm2, 64(%rax)
 ; AVX1-NEXT:    vmovaps %xmm5, 208(%rax)
-; AVX1-NEXT:    vmovaps %xmm0, 192(%rax)
-; AVX1-NEXT:    vmovaps %ymm9, 64(%rax)
-; AVX1-NEXT:    vmovapd %ymm7, 128(%rax)
-; AVX1-NEXT:    vmovaps %ymm13, 256(%rax)
-; AVX1-NEXT:    vmovapd %ymm12, 320(%rax)
-; AVX1-NEXT:    vmovapd %ymm11, 32(%rax)
-; AVX1-NEXT:    vmovaps %ymm8, 96(%rax)
-; AVX1-NEXT:    vmovapd %ymm1, 160(%rax)
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX1-NEXT:    vmovaps %xmm8, 192(%rax)
+; AVX1-NEXT:    vmovaps %xmm9, 240(%rax)
+; AVX1-NEXT:    vmovaps %xmm10, 224(%rax)
+; AVX1-NEXT:    vmovaps %xmm15, 272(%rax)
+; AVX1-NEXT:    vmovaps %xmm14, 256(%rax)
+; AVX1-NEXT:    vmovaps %ymm13, 96(%rax)
+; AVX1-NEXT:    vmovaps %ymm12, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm11, 160(%rax)
 ; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-NEXT:    vmovaps %ymm0, 288(%rax)
-; AVX1-NEXT:    vmovapd %ymm2, 352(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 320(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 352(%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_i64_stride6_vf8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    pushq %rax
-; AVX2-NEXT:    vmovaps 32(%rdx), %ymm7
-; AVX2-NEXT:    vmovaps (%r8), %ymm11
-; AVX2-NEXT:    vmovaps 32(%r8), %ymm13
-; AVX2-NEXT:    vmovaps (%r9), %xmm8
-; AVX2-NEXT:    vmovaps 32(%r9), %xmm0
-; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX2-NEXT:    vmovaps (%rcx), %xmm5
-; AVX2-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vmovaps 32(%rcx), %xmm15
-; AVX2-NEXT:    vmovaps (%rdx), %xmm3
-; AVX2-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vmovaps 32(%rdx), %xmm12
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm12[1],xmm15[1]
-; AVX2-NEXT:    vbroadcastsd 40(%r8), %ymm6
-; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5],ymm2[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm9 = xmm0[0,0]
-; AVX2-NEXT:    vmovaps (%rsi), %xmm4
-; AVX2-NEXT:    vmovaps 32(%rsi), %xmm1
-; AVX2-NEXT:    vmovaps (%rdi), %xmm6
-; AVX2-NEXT:    vmovaps 32(%rdi), %xmm2
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm2[1],xmm1[1]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm13[0,1],ymm10[0,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
+; AVX2-NEXT:    vmovaps (%rdi), %ymm5
+; AVX2-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX2-NEXT:    vmovaps (%rsi), %ymm6
+; AVX2-NEXT:    vmovaps 32(%rsi), %ymm3
+; AVX2-NEXT:    vmovaps (%rdx), %ymm7
+; AVX2-NEXT:    vmovaps 32(%rdx), %ymm4
+; AVX2-NEXT:    vmovaps (%rcx), %ymm8
+; AVX2-NEXT:    vmovaps 32(%rcx), %ymm9
+; AVX2-NEXT:    vmovaps (%r8), %ymm10
+; AVX2-NEXT:    vmovaps 32(%r8), %ymm1
+; AVX2-NEXT:    vmovaps (%r9), %ymm11
+; AVX2-NEXT:    vmovaps 32(%r9), %ymm12
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm12[1],ymm1[3],ymm12[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm4[1],ymm9[1],ymm4[3],ymm9[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm3[1],xmm5[1]
-; AVX2-NEXT:    vbroadcastsd 8(%r8), %ymm14
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm14[4,5],ymm9[6,7]
-; AVX2-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm14
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm14[6,7]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm13[2,3]
 ; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm3 = xmm8[0,0]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm6[1],xmm4[1]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm11[0,1],ymm14[0,1]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm3[2,3],ymm14[4,5,6,7]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm7[1],mem[1],ymm7[3],mem[3]
-; AVX2-NEXT:    vbroadcastsd 56(%r8), %ymm8
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm3[2,3],ymm8[2,3]
-; AVX2-NEXT:    vmovaps 32(%rdi), %ymm8
-; AVX2-NEXT:    vmovaps 32(%rsi), %ymm0
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm8[1],ymm0[1],ymm8[3],ymm0[3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm13[2,3],ymm10[2,3]
-; AVX2-NEXT:    vbroadcastsd 48(%r9), %ymm13
-; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm7[2,3]
-; AVX2-NEXT:    vbroadcastsd 48(%rcx), %ymm7
-; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm7[6,7]
-; AVX2-NEXT:    vmovaps (%rdx), %ymm7
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm7[1],mem[1],ymm7[3],mem[3]
-; AVX2-NEXT:    vbroadcastsd 24(%r8), %ymm13
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm8[2,3],ymm13[2,3]
-; AVX2-NEXT:    vmovaps (%rdi), %ymm13
-; AVX2-NEXT:    vmovaps (%rsi), %ymm0
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm9 = ymm13[1],ymm0[1],ymm13[3],ymm0[3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm11[2,3],ymm9[2,3]
-; AVX2-NEXT:    vbroadcastsd 16(%r9), %ymm11
-; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm11[2,3],ymm9[4,5,6,7]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm7[2,3]
-; AVX2-NEXT:    vbroadcastsd 16(%rcx), %ymm7
-; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm12[0],xmm15[0]
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm4[0]
-; AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX2-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX2-NEXT:    # xmm6 = xmm6[0],mem[0]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm9[0],ymm4[2],ymm9[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm4[2,3]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm4[2,3],ymm3[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm9[2,3],ymm4[2,3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm5[2,3],ymm7[2,3]
+; AVX2-NEXT:    vmovaps 32(%rcx), %xmm6
+; AVX2-NEXT:    vmovaps 32(%rdx), %xmm7
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm6[1]
+; AVX2-NEXT:    vmovaps (%r9), %xmm1
+; AVX2-NEXT:    vmovaps 32(%r9), %xmm2
+; AVX2-NEXT:    vmovaps (%r8), %xmm3
+; AVX2-NEXT:    vmovaps 32(%r8), %xmm4
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm2[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0]
+; AVX2-NEXT:    vmovaps 32(%rsi), %xmm4
+; AVX2-NEXT:    vmovaps 32(%rdi), %xmm0
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm4[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm4[0]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm4
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm3[0],xmm1[0]
+; AVX2-NEXT:    vmovaps (%rdi), %xmm3
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX2-NEXT:    vmovaps (%rcx), %xmm4
+; AVX2-NEXT:    vmovaps (%rdx), %xmm0
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm4[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
 ; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
-; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX2-NEXT:    vmovaps %xmm6, 16(%rax)
-; AVX2-NEXT:    vmovaps %xmm4, (%rax)
-; AVX2-NEXT:    vmovaps %xmm2, 208(%rax)
-; AVX2-NEXT:    vmovaps %xmm1, 192(%rax)
-; AVX2-NEXT:    vmovaps %ymm0, 96(%rax)
-; AVX2-NEXT:    vmovaps %ymm9, 128(%rax)
-; AVX2-NEXT:    vmovaps %ymm7, 160(%rax)
-; AVX2-NEXT:    vmovaps %ymm3, 288(%rax)
-; AVX2-NEXT:    vmovaps %ymm10, 320(%rax)
-; AVX2-NEXT:    vmovaps %ymm5, 352(%rax)
-; AVX2-NEXT:    vmovaps %ymm14, 32(%rax)
+; AVX2-NEXT:    vmovaps %xmm0, 16(%rax)
+; AVX2-NEXT:    vmovaps %xmm3, (%rax)
+; AVX2-NEXT:    vmovaps %xmm1, 48(%rax)
+; AVX2-NEXT:    vmovaps %xmm5, 32(%rax)
+; AVX2-NEXT:    vmovaps %xmm7, 80(%rax)
+; AVX2-NEXT:    vmovaps %xmm2, 64(%rax)
+; AVX2-NEXT:    vmovaps %xmm6, 208(%rax)
+; AVX2-NEXT:    vmovaps %xmm8, 192(%rax)
+; AVX2-NEXT:    vmovaps %xmm9, 240(%rax)
+; AVX2-NEXT:    vmovaps %xmm10, 224(%rax)
+; AVX2-NEXT:    vmovaps %xmm11, 272(%rax)
+; AVX2-NEXT:    vmovaps %xmm12, 256(%rax)
+; AVX2-NEXT:    vmovaps %ymm13, 96(%rax)
+; AVX2-NEXT:    vmovaps %ymm14, 128(%rax)
+; AVX2-NEXT:    vmovaps %ymm15, 160(%rax)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX2-NEXT:    vmovaps %ymm0, 288(%rax)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX2-NEXT:    vmovaps %ymm0, 320(%rax)
 ; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vmovaps %ymm0, 256(%rax)
-; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    vmovaps %ymm0, 352(%rax)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_i64_stride6_vf8:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm4
+; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm5
 ; AVX512-NEXT:    vmovdqu64 (%rsi), %zmm6
-; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm2
-; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm3
-; AVX512-NEXT:    vmovdqu64 (%r8), %zmm10
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
+; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm3
+; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm4
+; AVX512-NEXT:    vmovdqu64 (%r8), %zmm8
+; AVX512-NEXT:    vmovdqu64 (%r9), %zmm2
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [1,9,2,10,1,9,2,10]
 ; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm6, %zmm4, %zmm0
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,4,12>
-; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqa (%r8), %xmm7
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm7 = xmm7[1],mem[1]
+; AVX512-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512-NEXT:    movb $12, %al
 ; AVX512-NEXT:    kmovd %eax, %k1
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512-NEXT:    movb $16, %al
-; AVX512-NEXT:    kmovd %eax, %k2
-; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k2}
-; AVX512-NEXT:    vmovdqu64 (%r9), %zmm5
+; AVX512-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm0 {%k1}
 ; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [2,10,2,10,2,10,2,10]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm6, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,9,2,10,1,9,2,10]
-; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm7
 ; AVX512-NEXT:    movb $48, %al
 ; AVX512-NEXT:    kmovd %eax, %k2
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,9,u,4,5,6,7>
-; AVX512-NEXT:    vpermi2q %zmm10, %zmm8, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,9,4,5,6,7]
-; AVX512-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
-; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [6,14,6,14,6,14,6,14]
-; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm6, %zmm4, %zmm7
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,13,6,14,5,13,6,14]
-; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k2}
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,13,u,4,5,6,7>
-; AVX512-NEXT:    vpermi2q %zmm10, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,1,2,13,4,5,6,7]
-; AVX512-NEXT:    vpermi2q %zmm5, %zmm7, %zmm9
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k2}
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,8,1,9,0,8,1,9]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm6, %zmm4, %zmm7
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm7
 ; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k1}
-; AVX512-NEXT:    vinserti32x4 $2, (%r8), %zmm7, %zmm1
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,8,6,7]
-; AVX512-NEXT:    vpermi2q %zmm5, %zmm1, %zmm7
-; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,7,15,7,15,7,15]
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
 ; AVX512-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm1
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm1
+; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [2,10,3,11,2,10,3,11]
+; AVX512-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm1
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm9
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [3,11,3,11,3,11,3,11]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm9
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm1 {%k2}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [7,15,7,15,7,15,7,15]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm9
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [6,14,7,15,6,14,7,15]
+; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm10
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,7,15>
-; AVX512-NEXT:    vpermi2q %zmm6, %zmm4, %zmm11
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm11[0,1,2,3],zmm1[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <14,u,2,3,4,5,15,u>
-; AVX512-NEXT:    vpermi2q %zmm10, %zmm1, %zmm4
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,14,2,3,4,5,6,15]
-; AVX512-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
-; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,3,11,3,11,3,11]
-; AVX512-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <10,u,2,3,4,5,11,u>
-; AVX512-NEXT:    vpermi2q %zmm10, %zmm2, %zmm3
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,10,2,3,4,5,6,11]
-; AVX512-NEXT:    vpermi2q %zmm5, %zmm3, %zmm2
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,12,6,7]
-; AVX512-NEXT:    vpermi2q %zmm5, %zmm0, %zmm3
-; AVX512-NEXT:    vmovdqu64 %zmm3, 192(%r10)
-; AVX512-NEXT:    vmovdqu64 %zmm2, 128(%r10)
-; AVX512-NEXT:    vmovdqu64 %zmm1, 320(%r10)
-; AVX512-NEXT:    vmovdqu64 %zmm9, 256(%r10)
-; AVX512-NEXT:    vmovdqu64 %zmm8, 64(%r10)
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm11
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k1}
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k2}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm9
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [5,13,6,14,5,13,6,14]
+; AVX512-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm11
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,5,13>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm12
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm11 {%k1}
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm11 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [4,12,5,13,4,12,5,13]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm9
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,4,12>
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k2} = zmm8[0],zmm2[0],zmm8[2],zmm2[2],zmm8[4],zmm2[4],zmm8[6],zmm2[6]
+; AVX512-NEXT:    vmovdqu64 %zmm11, 256(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm10, 320(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm1, 128(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm9, 192(%r10)
 ; AVX512-NEXT:    vmovdqu64 %zmm7, (%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm0, 64(%r10)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <8 x i64>, <8 x i64>* %in.vecptr0, align 32

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
index b8bae39a13ca2..06d9cc004403f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
@@ -227,64 +227,64 @@ define void @store_i8_stride3_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
 define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr1, <16 x i8>* %in.vecptr2, <48 x i8>* %out.vec) nounwind {
 ; SSE-LABEL: store_i8_stride3_vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm2
-; SSE-NEXT:    movdqa (%rsi), %xmm4
-; SSE-NEXT:    movdqa (%rdx), %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm2[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,4,6,5]
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
-; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    pandn %xmm3, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[2,1,2,3]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[1,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,5,5,6]
+; SSE-NEXT:    movdqa (%rdi), %xmm5
+; SSE-NEXT:    movdqa (%rsi), %xmm1
+; SSE-NEXT:    movdqa (%rdx), %xmm8
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    pandn %xmm2, %xmm4
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,5,5,6]
 ; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm5, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
-; SSE-NEXT:    pand %xmm5, %xmm6
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm1[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm3
-; SSE-NEXT:    pandn %xmm7, %xmm3
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    movdqa %xmm4, %xmm6
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE-NEXT:    por %xmm4, %xmm6
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
+; SSE-NEXT:    pand %xmm2, %xmm6
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm5[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,4,6,5]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pandn %xmm7, %xmm4
+; SSE-NEXT:    por %xmm6, %xmm4
+; SSE-NEXT:    movdqa %xmm1, %xmm6
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,1,2]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,1,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[0,0,2,1,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
-; SSE-NEXT:    pand %xmm5, %xmm7
-; SSE-NEXT:    pandn %xmm6, %xmm5
-; SSE-NEXT:    por %xmm7, %xmm5
-; SSE-NEXT:    pand %xmm0, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,1,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,0,0,0,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6]
 ; SSE-NEXT:    movdqa %xmm0, %xmm7
 ; SSE-NEXT:    pandn %xmm6, %xmm7
-; SSE-NEXT:    por %xmm5, %xmm7
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,2,2,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[1,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm4, %xmm0
-; SSE-NEXT:    por %xmm2, %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,1,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,1,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6]
+; SSE-NEXT:    pand %xmm0, %xmm6
+; SSE-NEXT:    por %xmm7, %xmm6
+; SSE-NEXT:    pand %xmm2, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    pandn %xmm7, %xmm3
+; SSE-NEXT:    por %xmm6, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[1,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7]
+; SSE-NEXT:    pand %xmm0, %xmm6
+; SSE-NEXT:    pandn %xmm5, %xmm0
+; SSE-NEXT:    por %xmm6, %xmm0
 ; SSE-NEXT:    pand %xmm2, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
 ; SSE-NEXT:    pandn %xmm1, %xmm2
 ; SSE-NEXT:    por %xmm0, %xmm2
 ; SSE-NEXT:    movdqa %xmm2, 32(%rcx)
-; SSE-NEXT:    movdqa %xmm7, (%rcx)
-; SSE-NEXT:    movdqa %xmm3, 16(%rcx)
+; SSE-NEXT:    movdqa %xmm3, (%rcx)
+; SSE-NEXT:    movdqa %xmm4, 16(%rcx)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: store_i8_stride3_vf16:
@@ -355,114 +355,114 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
 ; SSE-NEXT:    movdqa 16(%rsi), %xmm7
 ; SSE-NEXT:    movdqa (%rdx), %xmm8
 ; SSE-NEXT:    movdqa 16(%rdx), %xmm10
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm11[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,1,2,3]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    por %xmm2, %xmm1
-; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
-; SSE-NEXT:    pand %xmm5, %xmm1
-; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm10[2,1,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm12
-; SSE-NEXT:    pandn %xmm2, %xmm12
-; SSE-NEXT:    por %xmm1, %xmm12
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm9[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,5]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm13[2,1,2,3]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    por %xmm2, %xmm1
-; SSE-NEXT:    pand %xmm5, %xmm1
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm10[2,1,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    pandn %xmm2, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm7[2,1,2,3]
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6]
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    por %xmm3, %xmm2
+; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
+; SSE-NEXT:    pand %xmm4, %xmm2
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm11[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,4,6,5]
+; SSE-NEXT:    movdqa %xmm4, %xmm12
+; SSE-NEXT:    pandn %xmm6, %xmm12
+; SSE-NEXT:    por %xmm2, %xmm12
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm8[2,1,3,3,4,5,6,7]
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm5, %xmm6
+; SSE-NEXT:    movdqa %xmm0, %xmm6
 ; SSE-NEXT:    pandn %xmm2, %xmm6
-; SSE-NEXT:    por %xmm1, %xmm6
-; SSE-NEXT:    movdqa %xmm7, %xmm1
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm13[2,1,2,3]
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6]
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    por %xmm6, %xmm2
+; SSE-NEXT:    pand %xmm4, %xmm2
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm9[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,4,6,5]
+; SSE-NEXT:    movdqa %xmm4, %xmm6
+; SSE-NEXT:    pandn %xmm1, %xmm6
+; SSE-NEXT:    por %xmm2, %xmm6
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,7,7,7,7]
-; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    por %xmm2, %xmm3
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0]
-; SSE-NEXT:    pand %xmm2, %xmm3
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[2,3,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    pandn %xmm4, %xmm1
-; SSE-NEXT:    por %xmm3, %xmm1
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    por %xmm2, %xmm1
+; SSE-NEXT:    pand %xmm4, %xmm1
+; SSE-NEXT:    movdqa %xmm7, %xmm2
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,2,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,5,6,6,7]
+; SSE-NEXT:    movdqa %xmm4, %xmm2
+; SSE-NEXT:    pandn %xmm3, %xmm2
+; SSE-NEXT:    por %xmm1, %xmm2
 ; SSE-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm7[0,1,1,2]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,2,4,5,6,7]
-; SSE-NEXT:    movdqa %xmm5, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm11[0,1,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,0,2,1,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,6]
-; SSE-NEXT:    pand %xmm5, %xmm3
-; SSE-NEXT:    por %xmm4, %xmm3
-; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[0,1,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,0,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
-; SSE-NEXT:    movdqa %xmm0, %xmm7
-; SSE-NEXT:    pandn %xmm4, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[0,1,1,2]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,2,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    pandn %xmm1, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[0,1,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,1,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,5,6,6]
+; SSE-NEXT:    pand %xmm0, %xmm7
 ; SSE-NEXT:    por %xmm3, %xmm7
-; SSE-NEXT:    movdqa %xmm13, %xmm3
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm13[8],xmm3[9],xmm13[9],xmm3[10],xmm13[10],xmm3[11],xmm13[11],xmm3[12],xmm13[12],xmm3[13],xmm13[13],xmm3[14],xmm13[14],xmm3[15],xmm13[15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,2,2,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,6,7]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
+; SSE-NEXT:    pand %xmm4, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[0,1,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,5,5,6,6]
+; SSE-NEXT:    movdqa %xmm4, %xmm1
+; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    por %xmm7, %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm7
+; SSE-NEXT:    pandn %xmm3, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7]
 ; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    por %xmm4, %xmm3
-; SSE-NEXT:    pand %xmm2, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm8[2,3,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[1,1,2,2,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,7,7]
-; SSE-NEXT:    pandn %xmm4, %xmm2
-; SSE-NEXT:    por %xmm3, %xmm2
+; SSE-NEXT:    por %xmm7, %xmm3
+; SSE-NEXT:    pand %xmm4, %xmm3
+; SSE-NEXT:    movdqa %xmm13, %xmm7
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[1,2,2,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,6,6,7]
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    pandn %xmm7, %xmm5
+; SSE-NEXT:    por %xmm3, %xmm5
 ; SSE-NEXT:    punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm13[0,1,1,2]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[0,1,0,1]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,0,2,1,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
-; SSE-NEXT:    pand %xmm5, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm5
-; SSE-NEXT:    por %xmm4, %xmm5
-; SSE-NEXT:    pand %xmm0, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm9[0,1,0,1]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[0,0,2,1,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
+; SSE-NEXT:    pand %xmm0, %xmm7
+; SSE-NEXT:    pandn %xmm3, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pand %xmm4, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,6]
-; SSE-NEXT:    pandn %xmm3, %xmm0
-; SSE-NEXT:    por %xmm5, %xmm0
-; SSE-NEXT:    movdqa %xmm0, (%rcx)
-; SSE-NEXT:    movdqa %xmm2, 32(%rcx)
-; SSE-NEXT:    movdqa %xmm7, 48(%rcx)
-; SSE-NEXT:    movdqa %xmm1, 80(%rcx)
+; SSE-NEXT:    pandn %xmm3, %xmm4
+; SSE-NEXT:    por %xmm0, %xmm4
+; SSE-NEXT:    movdqa %xmm4, (%rcx)
+; SSE-NEXT:    movdqa %xmm5, 32(%rcx)
+; SSE-NEXT:    movdqa %xmm1, 48(%rcx)
+; SSE-NEXT:    movdqa %xmm2, 80(%rcx)
 ; SSE-NEXT:    movdqa %xmm6, 16(%rcx)
 ; SSE-NEXT:    movdqa %xmm12, 64(%rcx)
 ; SSE-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
index 7270051c02817..62105d77c1071 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
@@ -139,15 +139,41 @@ define void @store_i8_stride4_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE-NEXT:    movq {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT:    pxor %xmm5, %xmm5
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE-NEXT:    movdqa %xmm3, %xmm2
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[2,2,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,0,1,1]
+; SSE-NEXT:    packuswb %xmm6, %xmm7
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
+; SSE-NEXT:    movdqa %xmm2, %xmm6
+; SSE-NEXT:    pandn %xmm7, %xmm6
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT:    movdqa %xmm0, %xmm5
+; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,1,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3]
+; SSE-NEXT:    packuswb %xmm7, %xmm5
+; SSE-NEXT:    pand %xmm2, %xmm5
+; SSE-NEXT:    por %xmm6, %xmm5
+; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[2,2,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-NEXT:    packuswb %xmm4, %xmm3
 ; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT:    movdqa %xmm0, 16(%r8)
-; SSE-NEXT:    movdqa %xmm2, (%r8)
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pandn %xmm3, %xmm2
+; SSE-NEXT:    por %xmm0, %xmm2
+; SSE-NEXT:    movdqa %xmm2, 16(%r8)
+; SSE-NEXT:    movdqa %xmm5, (%r8)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: store_i8_stride4_vf8:

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 39dc9621c05dd..8f09a2fdc769d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -139,39 +139,46 @@ define void @store_i8_stride6_vf4(<4 x i8>* %in.vecptr0, <4 x i8>* %in.vecptr1,
 ; SSE-NEXT:    movdqa (%r8), %xmm0
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
 ; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
-; SSE-NEXT:    pxor %xmm3, %xmm3
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,2,2,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,0,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,2,1,3]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,6]
-; SSE-NEXT:    packuswb %xmm5, %xmm4
-; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,2,2,0]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,7,5]
-; SSE-NEXT:    packuswb %xmm3, %xmm6
-; SSE-NEXT:    pand %xmm5, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm5
-; SSE-NEXT:    por %xmm6, %xmm5
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE-NEXT:    pxor %xmm4, %xmm4
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,2,1,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,5,4,6]
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[0,1,1,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,6,7]
+; SSE-NEXT:    shufps {{.*#+}} xmm6 = xmm6[2,0],xmm5[3,0]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm6[0,2]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[0,2,2,0]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm5[0,2,2,3,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[0,2,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,2,4,5,6,7]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[1,1],xmm6[0,3]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,5]
+; SSE-NEXT:    shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,3]
+; SSE-NEXT:    packuswb %xmm3, %xmm7
 ; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT:    pand %xmm3, %xmm5
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,0,0]
-; SSE-NEXT:    pandn %xmm4, %xmm3
-; SSE-NEXT:    por %xmm5, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,7,6,7]
+; SSE-NEXT:    pand %xmm3, %xmm7
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,1,2,0]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,4,6,7]
+; SSE-NEXT:    packuswb %xmm4, %xmm5
+; SSE-NEXT:    pandn %xmm5, %xmm3
+; SSE-NEXT:    por %xmm7, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
-; SSE-NEXT:    packuswb %xmm2, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE-NEXT:    packuswb %xmm1, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,0,0,65535,65535,65535,65535,65535]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,1,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
+; SSE-NEXT:    packuswb %xmm0, %xmm0
 ; SSE-NEXT:    pand %xmm2, %xmm0
 ; SSE-NEXT:    pandn %xmm1, %xmm2
 ; SSE-NEXT:    por %xmm0, %xmm2
@@ -287,63 +294,54 @@ define void @store_i8_stride6_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
 ; SSE-LABEL: store_i8_stride6_vf8:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movq {{.*#+}} xmm9 = mem[0],zero
-; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3],xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
-; SSE-NEXT:    movq {{.*#+}} xmm10 = mem[0],zero
 ; SSE-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3],xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
-; SSE-NEXT:    movq {{.*#+}} xmm8 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE-NEXT:    movq {{.*#+}} xmm4 = mem[0],zero
-; SSE-NEXT:    pxor %xmm5, %xmm5
-; SSE-NEXT:    movdqa %xmm8, %xmm3
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE-NEXT:    movdqa %xmm3, %xmm5
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE-NEXT:    packuswb %xmm5, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,0,2,3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT:    movdqa %xmm6, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm9[0,0,1,1]
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
 ; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    pand %xmm1, %xmm5
-; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm10[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm1, %xmm2
-; SSE-NEXT:    pandn %xmm0, %xmm2
-; SSE-NEXT:    por %xmm5, %xmm2
-; SSE-NEXT:    pand %xmm6, %xmm2
-; SSE-NEXT:    por %xmm7, %xmm2
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT:    packuswb %xmm3, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,1,3,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[3,3,3,3]
-; SSE-NEXT:    movdqa %xmm6, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,5,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,2,2,3]
-; SSE-NEXT:    pand %xmm6, %xmm3
-; SSE-NEXT:    por %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm1, %xmm3
-; SSE-NEXT:    pandn %xmm0, %xmm1
-; SSE-NEXT:    por %xmm3, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm9[1,1,2,2]
-; SSE-NEXT:    pand %xmm6, %xmm0
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm10[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pandn %xmm3, %xmm6
-; SSE-NEXT:    por %xmm0, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm8[1,1,2,2]
-; SSE-NEXT:    pandn %xmm3, %xmm0
-; SSE-NEXT:    por %xmm6, %xmm0
-; SSE-NEXT:    movdqa %xmm1, 32(%rax)
-; SSE-NEXT:    movdqa %xmm0, 16(%rax)
-; SSE-NEXT:    movdqa %xmm2, (%rax)
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm0[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm1, %xmm6
+; SSE-NEXT:    pandn %xmm5, %xmm6
+; SSE-NEXT:    por %xmm3, %xmm6
+; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    pand %xmm3, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm3, %xmm5
+; SSE-NEXT:    pandn %xmm7, %xmm5
+; SSE-NEXT:    por %xmm6, %xmm5
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[2,2,3,3]
+; SSE-NEXT:    movdqa %xmm1, %xmm7
+; SSE-NEXT:    pandn %xmm6, %xmm7
+; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,2,2,3]
+; SSE-NEXT:    pand %xmm1, %xmm6
+; SSE-NEXT:    por %xmm7, %xmm6
+; SSE-NEXT:    pand %xmm3, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm2[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm3, %xmm7
+; SSE-NEXT:    pandn %xmm8, %xmm7
+; SSE-NEXT:    por %xmm6, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,2,2]
+; SSE-NEXT:    pand %xmm1, %xmm4
+; SSE-NEXT:    pandn %xmm2, %xmm1
+; SSE-NEXT:    por %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pandn %xmm0, %xmm3
+; SSE-NEXT:    por %xmm1, %xmm3
+; SSE-NEXT:    movdqa %xmm3, 16(%rax)
+; SSE-NEXT:    movdqa %xmm7, 32(%rax)
+; SSE-NEXT:    movdqa %xmm5, (%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i8_stride6_vf8:
@@ -498,200 +496,156 @@ define void @store_i8_stride6_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
 define void @store_i8_stride6_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr1, <16 x i8>* %in.vecptr2, <16 x i8>* %in.vecptr3, <16 x i8>* %in.vecptr4, <16 x i8>* %in.vecptr5, <96 x i8>* %out.vec) nounwind {
 ; SSE-LABEL: store_i8_stride6_vf16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm9
-; SSE-NEXT:    movdqa (%rsi), %xmm5
-; SSE-NEXT:    movdqa (%rdx), %xmm11
-; SSE-NEXT:    movdqa (%rcx), %xmm14
-; SSE-NEXT:    movdqa (%r8), %xmm15
-; SSE-NEXT:    movdqa (%r9), %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm9, %xmm1
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,0,1,1]
-; SSE-NEXT:    movdqa %xmm1, %xmm10
-; SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    pand %xmm7, %xmm0
-; SSE-NEXT:    movdqa %xmm11, %xmm2
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm2[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    movdqa %xmm2, %xmm6
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm7, %xmm2
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    por %xmm0, %xmm2
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    movdqa %xmm15, %xmm12
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm12 = xmm12[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm12[0,0,0,0]
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movdqa (%rdi), %xmm6
+; SSE-NEXT:    movdqa (%rsi), %xmm10
+; SSE-NEXT:    movdqa (%rdx), %xmm5
+; SSE-NEXT:    movdqa (%rcx), %xmm12
+; SSE-NEXT:    movdqa (%r8), %xmm7
+; SSE-NEXT:    movdqa (%r9), %xmm14
+; SSE-NEXT:    movdqa %xmm6, %xmm11
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1]
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm5, %xmm8
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3],xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm8[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    por %xmm2, %xmm3
-; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT:    pand %xmm8, %xmm3
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm8, %xmm2
-; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    por %xmm3, %xmm2
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[3,3,3,3]
+; SSE-NEXT:    pandn %xmm2, %xmm3
+; SSE-NEXT:    por %xmm1, %xmm3
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    pand %xmm2, %xmm3
+; SSE-NEXT:    movdqa %xmm7, %xmm13
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3],xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm13[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm2, %xmm9
+; SSE-NEXT:    pandn %xmm1, %xmm9
+; SSE-NEXT:    por %xmm3, %xmm9
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm13[2,2,3,3]
 ; SSE-NEXT:    movdqa %xmm0, %xmm3
 ; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,6,7,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
 ; SSE-NEXT:    pand %xmm0, %xmm1
 ; SSE-NEXT:    por %xmm3, %xmm1
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm12[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm7, %xmm6
-; SSE-NEXT:    pandn %xmm3, %xmm6
-; SSE-NEXT:    pand %xmm7, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm13 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm13, %xmm10
-; SSE-NEXT:    pandn %xmm3, %xmm10
-; SSE-NEXT:    pand %xmm13, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm10
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm14[8],xmm11[9],xmm14[9],xmm11[10],xmm14[10],xmm11[11],xmm14[11],xmm11[12],xmm14[12],xmm11[13],xmm14[13],xmm11[14],xmm14[14],xmm11[15],xmm14[15]
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm11[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm11[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm15
+; SSE-NEXT:    pandn %xmm4, %xmm15
+; SSE-NEXT:    por %xmm1, %xmm15
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[2,2,3,3]
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    pandn %xmm1, %xmm4
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm12[8],xmm5[9],xmm12[9],xmm5[10],xmm12[10],xmm5[11],xmm12[11],xmm5[12],xmm12[12],xmm5[13],xmm12[13],xmm5[14],xmm12[14],xmm5[15],xmm12[15]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    por %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pandn %xmm3, %xmm4
+; SSE-NEXT:    por %xmm1, %xmm4
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm6[0,0,1,1]
+; SSE-NEXT:    pand %xmm0, %xmm1
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm5[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm10 = xmm3[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    pandn %xmm10, %xmm3
+; SSE-NEXT:    por %xmm1, %xmm3
+; SSE-NEXT:    pand %xmm2, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm2, %xmm1
+; SSE-NEXT:    pandn %xmm10, %xmm1
+; SSE-NEXT:    por %xmm3, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[1,1,2,2]
 ; SSE-NEXT:    movdqa %xmm0, %xmm6
 ; SSE-NEXT:    pandn %xmm3, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[1,1,2,2]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
 ; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm15 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm15[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm1, %xmm5
-; SSE-NEXT:    pandn %xmm3, %xmm5
-; SSE-NEXT:    pand %xmm1, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm5
-; SSE-NEXT:    punpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm3 = xmm3[8],mem[8],xmm3[9],mem[9],xmm3[10],mem[10],xmm3[11],mem[11],xmm3[12],mem[12],xmm3[13],mem[13],xmm3[14],mem[14],xmm3[15],mem[15]
-; SSE-NEXT:    movdqa {{.*#+}} xmm14 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm14, %xmm2
-; SSE-NEXT:    pandn %xmm6, %xmm2
-; SSE-NEXT:    pand %xmm14, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm9[3,3,3,3]
-; SSE-NEXT:    movdqa %xmm0, %xmm6
+; SSE-NEXT:    por %xmm6, %xmm3
+; SSE-NEXT:    pand %xmm2, %xmm3
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm2, %xmm6
 ; SSE-NEXT:    pandn %xmm5, %xmm6
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm11[0,1,2,3,5,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,2,2,3]
+; SSE-NEXT:    por %xmm3, %xmm6
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm11[1,1,2,2]
+; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm13[1,1,2,2]
 ; SSE-NEXT:    pand %xmm0, %xmm5
-; SSE-NEXT:    por %xmm6, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm15[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm7, %xmm4
-; SSE-NEXT:    pandn %xmm6, %xmm4
-; SSE-NEXT:    pand %xmm7, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm4
-; SSE-NEXT:    pand %xmm13, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[2,2,3,3]
-; SSE-NEXT:    pandn %xmm5, %xmm13
-; SSE-NEXT:    por %xmm4, %xmm13
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[0,0,1,1]
-; SSE-NEXT:    pand %xmm7, %xmm4
-; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm11[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,1,0,1]
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    por %xmm4, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm15[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    pandn %xmm4, %xmm5
-; SSE-NEXT:    pand %xmm0, %xmm7
-; SSE-NEXT:    por %xmm7, %xmm5
-; SSE-NEXT:    pand %xmm8, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
-; SSE-NEXT:    pandn %xmm3, %xmm8
-; SSE-NEXT:    por %xmm5, %xmm8
-; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm3 = mem[1,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm4 = mem[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    pandn %xmm4, %xmm0
-; SSE-NEXT:    por %xmm3, %xmm0
-; SSE-NEXT:    pand %xmm1, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm12[1,1,2,2]
-; SSE-NEXT:    pandn %xmm3, %xmm1
-; SSE-NEXT:    por %xmm0, %xmm1
-; SSE-NEXT:    pand %xmm14, %xmm1
-; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm0 = mem[1,1,2,2]
-; SSE-NEXT:    pandn %xmm0, %xmm14
-; SSE-NEXT:    por %xmm1, %xmm14
-; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movdqa %xmm14, 16(%rax)
-; SSE-NEXT:    movdqa %xmm8, 48(%rax)
-; SSE-NEXT:    movdqa %xmm13, 80(%rax)
-; SSE-NEXT:    movdqa %xmm2, 64(%rax)
-; SSE-NEXT:    movdqa %xmm10, 32(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, (%rax)
+; SSE-NEXT:    pandn %xmm3, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm8[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pandn %xmm3, %xmm2
+; SSE-NEXT:    por %xmm0, %xmm2
+; SSE-NEXT:    movdqa %xmm2, 16(%rax)
+; SSE-NEXT:    movdqa %xmm6, 64(%rax)
+; SSE-NEXT:    movdqa %xmm1, 48(%rax)
+; SSE-NEXT:    movdqa %xmm4, 80(%rax)
+; SSE-NEXT:    movdqa %xmm15, 32(%rax)
+; SSE-NEXT:    movdqa %xmm9, (%rax)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: store_i8_stride6_vf16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm10
 ; AVX1-NEXT:    vmovdqa (%rcx), %xmm4
-; AVX1-NEXT:    vmovdqa (%r8), %xmm8
-; AVX1-NEXT:    vmovdqa (%r9), %xmm9
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3],xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
+; AVX1-NEXT:    vmovdqa (%r8), %xmm5
+; AVX1-NEXT:    vmovdqa (%r9), %xmm6
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[1,1,2,2]
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2],xmm0[3,4],xmm6[5],xmm0[6,7]
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm11 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[0,0,1,1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm5[1,0,2,2,4,5,6,7]
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm11 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[0,0,1,1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[1,0,2,2,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5,6],xmm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm2[8],xmm10[9],xmm2[9],xmm10[10],xmm2[10],xmm10[11],xmm2[11],xmm10[12],xmm2[12],xmm10[13],xmm2[13],xmm10[14],xmm2[14],xmm10[15],xmm2[15]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[1,0,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6],xmm4[7]
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm4[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,0,0,0]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm4[1,0,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4],xmm5[5],xmm6[6,7]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm6[0,0,1,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2],xmm0[3],xmm5[4,5],xmm0[6],xmm5[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[3,3,3,3]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,6,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3,4],xmm2[5],xmm5[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5,6],xmm5[7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,5,6,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3,4],xmm2[5],xmm5[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[2,2,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5,6],xmm5[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[3,3,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[2,2,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[3,3,3,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3,4],xmm3[5],xmm1[6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[1,1,2,2]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1,2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vmovaps %ymm1, 64(%rax)
 ; AVX1-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX1-NEXT:    vmovaps %ymm11, (%rax)
@@ -704,36 +658,34 @@ define void @store_i8_stride6_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-SLOW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8],zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[2,10],zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,ymm3[20,28],zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,ymm4[2,10],zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,ymm4[20,28],zero,zero,zero,zero,ymm4[21,29]
 ; AVX2-SLOW-NEXT:    vpor %ymm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero,ymm4[18,26],zero,zero
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[5,13],zero,zero,zero,zero,ymm5[6,14],zero,zero,zero,zero,ymm5[7,15],zero,zero,zero,zero,ymm5[16,24],zero,zero,zero,zero,ymm5[17,25],zero,zero,zero,zero,ymm5[18,26]
-; AVX2-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm4, %ymm5, %ymm4
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31],zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[5,13],zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm1[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[5,13],zero,zero,zero,zero,ymm6[6,14],zero,zero,zero,zero,ymm6[7,15],zero,zero,zero,zero,ymm6[16,24],zero,zero,zero,zero,ymm6[17,25],zero,zero,zero,zero,ymm6[18,26]
+; AVX2-SLOW-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u,18,26,u,u]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm4, %ymm6, %ymm4
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[3,11],zero,zero,zero,zero,ymm0[4,12],zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,10,u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,10],zero,zero,zero,zero,ymm2[3,11],zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,ymm2[21,29],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,ymm2[23,31]
+; AVX2-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31,u,u]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 32(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%rax)
@@ -746,36 +698,34 @@ define void @store_i8_stride6_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
 ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa (%r8), %xmm2
-; AVX2-FAST-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8],zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[2,10],zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,ymm3[20,28],zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,ymm4[2,10],zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,ymm4[20,28],zero,zero,zero,zero,ymm4[21,29]
 ; AVX2-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
 ; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero,ymm4[18,26],zero,zero
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,2,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[5,13],zero,zero,zero,zero,ymm5[6,14],zero,zero,zero,zero,ymm5[7,15],zero,zero,zero,zero,ymm5[16,24],zero,zero,zero,zero,ymm5[17,25],zero,zero,zero,zero,ymm5[18,26]
-; AVX2-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm4, %ymm5, %ymm4
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31],zero,zero
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[5,13],zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,ymm4[7,15],zero,zero,zero,zero,ymm4[16,24],zero,zero,zero,zero,ymm4[17,25],zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm1[0,2,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[5,13],zero,zero,zero,zero,ymm6[6,14],zero,zero,zero,zero,ymm6[7,15],zero,zero,zero,zero,ymm6[16,24],zero,zero,zero,zero,ymm6[17,25],zero,zero,zero,zero,ymm6[18,26]
+; AVX2-FAST-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u,18,26,u,u]
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm4, %ymm6, %ymm4
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[3,11],zero,zero,zero,zero,ymm0[4,12],zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero
-; AVX2-FAST-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,10,u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,10],zero,zero,zero,zero,ymm2[3,11],zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,ymm2[21,29],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,ymm2[23,31]
+; AVX2-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31,u,u]
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqa %ymm0, 64(%rax)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm4, 32(%rax)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm3, (%rax)
@@ -850,282 +800,218 @@ define void @store_i8_stride6_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
 ; SSE-LABEL: store_i8_stride6_vf32:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    subq $56, %rsp
-; SSE-NEXT:    movdqa 16(%rdi), %xmm11
-; SSE-NEXT:    movdqa 16(%rsi), %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rdx), %xmm8
-; SSE-NEXT:    movdqa 16(%rcx), %xmm5
-; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%r8), %xmm13
-; SSE-NEXT:    movdqa 16(%r9), %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm11, %xmm10
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[3,3,3,3]
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    movdqa 16(%rdi), %xmm10
+; SSE-NEXT:    movdqa 16(%rsi), %xmm8
+; SSE-NEXT:    movdqa 16(%rdx), %xmm9
+; SSE-NEXT:    movdqa 16(%rcx), %xmm4
+; SSE-NEXT:    movdqa (%r8), %xmm13
+; SSE-NEXT:    movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 16(%r8), %xmm12
+; SSE-NEXT:    movdqa 16(%r9), %xmm5
+; SSE-NEXT:    movdqa %xmm12, %xmm0
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm9, %xmm6
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    movdqa %xmm6, %xmm7
+; SSE-NEXT:    movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    por %xmm0, %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT:    pand %xmm15, %xmm1
+; SSE-NEXT:    movdqa %xmm10, %xmm11
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm11[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    pandn %xmm6, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm7[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm3, %xmm7
+; SSE-NEXT:    pandn %xmm6, %xmm7
+; SSE-NEXT:    por %xmm1, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    movdqa (%r9), %xmm6
+; SSE-NEXT:    movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pand %xmm15, %xmm7
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm12[2,2,3,3]
+; SSE-NEXT:    movdqa %xmm3, %xmm5
+; SSE-NEXT:    pandn %xmm1, %xmm5
+; SSE-NEXT:    movdqa %xmm9, %xmm2
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    por %xmm5, %xmm1
+; SSE-NEXT:    pand %xmm15, %xmm1
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; SSE-NEXT:    movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm10[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    pandn %xmm2, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[0,0,1,1]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm4[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm3, %xmm4
+; SSE-NEXT:    pandn %xmm2, %xmm4
+; SSE-NEXT:    por %xmm1, %xmm4
+; SSE-NEXT:    pand %xmm15, %xmm4
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm12[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm15, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    por %xmm4, %xmm0
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm13, %xmm0
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; SSE-NEXT:    movdqa %xmm0, %xmm9
+; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa %xmm3, %xmm2
 ; SSE-NEXT:    pandn %xmm1, %xmm2
-; SSE-NEXT:    movdqa %xmm8, %xmm4
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[2,2,2,3]
-; SSE-NEXT:    pand %xmm0, %xmm6
+; SSE-NEXT:    movdqa (%rdx), %xmm8
+; SSE-NEXT:    movdqa (%rcx), %xmm7
+; SSE-NEXT:    movdqa %xmm8, %xmm13
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm7[8],xmm13[9],xmm7[9],xmm13[10],xmm7[10],xmm13[11],xmm7[11],xmm13[12],xmm7[12],xmm13[13],xmm7[13],xmm13[14],xmm7[14],xmm13[15],xmm7[15]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[2,2,2,3]
+; SSE-NEXT:    pand %xmm3, %xmm6
 ; SSE-NEXT:    por %xmm2, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT:    pand %xmm2, %xmm6
-; SSE-NEXT:    movdqa %xmm13, %xmm1
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm2, %xmm7
-; SSE-NEXT:    pandn %xmm5, %xmm7
-; SSE-NEXT:    por %xmm6, %xmm7
-; SSE-NEXT:    movdqa {{.*#+}} xmm15 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
+; SSE-NEXT:    movdqa (%rdi), %xmm4
+; SSE-NEXT:    movdqa (%rsi), %xmm10
+; SSE-NEXT:    movdqa %xmm4, %xmm5
+; SSE-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm10[8],xmm5[9],xmm10[9],xmm5[10],xmm10[10],xmm5[11],xmm10[11],xmm5[12],xmm10[12],xmm5[13],xmm10[13],xmm5[14],xmm10[14],xmm5[15],xmm10[15]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm15, %xmm14
+; SSE-NEXT:    pandn %xmm1, %xmm14
+; SSE-NEXT:    pand %xmm15, %xmm6
+; SSE-NEXT:    por %xmm6, %xmm14
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm13[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,0,1,1]
+; SSE-NEXT:    pand %xmm3, %xmm1
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm9[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm15, %xmm9
+; SSE-NEXT:    pandn %xmm1, %xmm9
+; SSE-NEXT:    pand %xmm15, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm9
+; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
+; SSE-NEXT:    movdqa %xmm3, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm1
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,5,6,7,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; SSE-NEXT:    movdqa %xmm15, %xmm6
+; SSE-NEXT:    pandn %xmm1, %xmm6
+; SSE-NEXT:    pand %xmm15, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm6
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm8[1,0,2,2,4,5,6,7]
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; SSE-NEXT:    movdqa %xmm3, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[0,0,1,1]
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm15, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm1
 ; SSE-NEXT:    pand %xmm15, %xmm7
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm15, %xmm3
-; SSE-NEXT:    pandn %xmm6, %xmm3
-; SSE-NEXT:    por %xmm7, %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm6 = xmm4[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm0, %xmm7
-; SSE-NEXT:    pandn %xmm6, %xmm7
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm10[1,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm7
-; SSE-NEXT:    movdqa {{.*#+}} xmm12 = [0,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm1[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm12, %xmm6
-; SSE-NEXT:    pandn %xmm9, %xmm6
-; SSE-NEXT:    pand %xmm12, %xmm7
-; SSE-NEXT:    por %xmm7, %xmm6
-; SSE-NEXT:    movdqa {{.*#+}} xmm14 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm14, %xmm3
-; SSE-NEXT:    pandn %xmm7, %xmm3
-; SSE-NEXT:    pand %xmm14, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm2, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm10[0,0,1,1]
-; SSE-NEXT:    pand %xmm2, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm1, %xmm4
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm4
-; SSE-NEXT:    movdqa {{.*#+}} xmm10 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm10, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    pand %xmm10, %xmm4
+; SSE-NEXT:    por %xmm7, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm3, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm7
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = mem[1,1,2,2]
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm7 = mem[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm15, %xmm11
+; SSE-NEXT:    pandn %xmm7, %xmm11
+; SSE-NEXT:    pand %xmm15, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm11
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = mem[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm3, %xmm7
+; SSE-NEXT:    pandn %xmm0, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm12[1,1,2,2]
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm7, %xmm0
+; SSE-NEXT:    pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm7 = mem[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm15, %xmm12
+; SSE-NEXT:    pandn %xmm7, %xmm12
+; SSE-NEXT:    pand %xmm15, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm12
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2]
+; SSE-NEXT:    movdqa %xmm3, %xmm5
+; SSE-NEXT:    pandn %xmm0, %xmm5
+; SSE-NEXT:    pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = mem[1,1,2,2]
+; SSE-NEXT:    pand %xmm3, %xmm0
+; SSE-NEXT:    por %xmm5, %xmm0
+; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm13[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    movdqa %xmm15, %xmm7
+; SSE-NEXT:    pandn %xmm5, %xmm7
+; SSE-NEXT:    pand %xmm15, %xmm0
+; SSE-NEXT:    por %xmm0, %xmm7
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,1,2,2]
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
+; SSE-NEXT:    pand %xmm3, %xmm4
+; SSE-NEXT:    pandn %xmm0, %xmm3
 ; SSE-NEXT:    por %xmm4, %xmm3
-; SSE-NEXT:    movdqa %xmm3, (%rsp) # 16-byte Spill
-; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm11 = xmm11[0],mem[0],xmm11[1],mem[1],xmm11[2],mem[2],xmm11[3],mem[3],xmm11[4],mem[4],xmm11[5],mem[5],xmm11[6],mem[6],xmm11[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm1, %xmm4
-; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm8 = xmm8[0],mem[0],xmm8[1],mem[1],xmm8[2],mem[2],xmm8[3],mem[3],xmm8[4],mem[4],xmm8[5],mem[5],xmm8[6],mem[6],xmm8[7],mem[7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    por %xmm4, %xmm1
-; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm13[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm2, %xmm5
-; SSE-NEXT:    pandn %xmm4, %xmm5
-; SSE-NEXT:    por %xmm1, %xmm5
-; SSE-NEXT:    pand %xmm15, %xmm5
-; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm15, %xmm3
-; SSE-NEXT:    pandn %xmm4, %xmm3
-; SSE-NEXT:    por %xmm5, %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm8[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    pandn %xmm4, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm11[1,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm13[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm12, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm6
-; SSE-NEXT:    pand %xmm12, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm14, %xmm3
-; SSE-NEXT:    pandn %xmm4, %xmm3
-; SSE-NEXT:    pand %xmm14, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm8[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm2, %xmm5
-; SSE-NEXT:    pandn %xmm4, %xmm5
-; SSE-NEXT:    movdqa (%rdi), %xmm7
-; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm11[0,0,1,1]
-; SSE-NEXT:    pand %xmm2, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm5
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm13[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm0, %xmm6
-; SSE-NEXT:    pandn %xmm4, %xmm6
-; SSE-NEXT:    movdqa (%rsi), %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pand %xmm0, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm6
-; SSE-NEXT:    pand %xmm10, %xmm6
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm10, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[3,3,3,3]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm1, %xmm4
-; SSE-NEXT:    movdqa (%rdx), %xmm13
-; SSE-NEXT:    movdqa (%rcx), %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa %xmm13, %xmm1
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm5 = xmm1[0,1,2,3,5,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[2,2,2,3]
-; SSE-NEXT:    pand %xmm0, %xmm6
-; SSE-NEXT:    por %xmm4, %xmm6
-; SSE-NEXT:    movdqa (%r8), %xmm9
-; SSE-NEXT:    movdqa %xmm9, %xmm4
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm11 = xmm4[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm2, %xmm3
-; SSE-NEXT:    pandn %xmm11, %xmm3
-; SSE-NEXT:    pand %xmm2, %xmm6
-; SSE-NEXT:    por %xmm6, %xmm3
-; SSE-NEXT:    movdqa (%r9), %xmm5
-; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
-; SSE-NEXT:    pshufd {{.*#+}} xmm11 = xmm6[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm15, %xmm5
-; SSE-NEXT:    pandn %xmm11, %xmm5
 ; SSE-NEXT:    pand %xmm15, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm5
-; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    pandn %xmm11, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm7[1,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm5
-; SSE-NEXT:    por %xmm5, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm12, %xmm8
-; SSE-NEXT:    pandn %xmm5, %xmm8
-; SSE-NEXT:    pand %xmm12, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm8
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[1,1,2,2]
-; SSE-NEXT:    movdqa %xmm14, %xmm11
-; SSE-NEXT:    pandn %xmm3, %xmm11
-; SSE-NEXT:    pand %xmm14, %xmm8
-; SSE-NEXT:    por %xmm8, %xmm11
-; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; SSE-NEXT:    movdqa %xmm2, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm7[0,0,1,1]
-; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm3
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm1, %xmm4
-; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm6[0,0,0,0]
-; SSE-NEXT:    movdqa %xmm10, %xmm7
-; SSE-NEXT:    pandn %xmm1, %xmm7
-; SSE-NEXT:    pand %xmm10, %xmm4
-; SSE-NEXT:    por %xmm4, %xmm7
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3],xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
-; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    pandn %xmm1, %xmm3
-; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1],xmm13[2],mem[2],xmm13[3],mem[3],xmm13[4],mem[4],xmm13[5],mem[5],xmm13[6],mem[6],xmm13[7],mem[7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,5,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
-; SSE-NEXT:    pand %xmm0, %xmm1
-; SSE-NEXT:    por %xmm3, %xmm1
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[2,2,3,3]
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    pand %xmm2, %xmm1
-; SSE-NEXT:    por %xmm1, %xmm4
-; SSE-NEXT:    pand %xmm15, %xmm4
-; SSE-NEXT:    punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
-; SSE-NEXT:    pandn %xmm3, %xmm15
-; SSE-NEXT:    por %xmm4, %xmm15
-; SSE-NEXT:    pshuflw {{.*#+}} xmm3 = xmm13[3,3,3,3,4,5,6,7]
-; SSE-NEXT:    pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    pandn %xmm3, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,2,2]
-; SSE-NEXT:    pand %xmm0, %xmm3
-; SSE-NEXT:    por %xmm3, %xmm4
-; SSE-NEXT:    pand %xmm12, %xmm4
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[1,1,2,2]
-; SSE-NEXT:    pandn %xmm3, %xmm12
-; SSE-NEXT:    por %xmm4, %xmm12
-; SSE-NEXT:    pand %xmm14, %xmm12
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,2,2]
-; SSE-NEXT:    pandn %xmm3, %xmm14
-; SSE-NEXT:    por %xmm12, %xmm14
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[0,0,1,1]
-; SSE-NEXT:    pand %xmm2, %xmm3
-; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm13[1,0,2,2,4,5,6,7]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
-; SSE-NEXT:    pandn %xmm4, %xmm2
-; SSE-NEXT:    por %xmm3, %xmm2
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm9[0,0,0,0]
-; SSE-NEXT:    pandn %xmm3, %xmm0
-; SSE-NEXT:    por %xmm2, %xmm0
-; SSE-NEXT:    pand %xmm10, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT:    pandn %xmm1, %xmm10
-; SSE-NEXT:    por %xmm0, %xmm10
+; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm8[3,3,3,3,4,5,6,7]
+; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; SSE-NEXT:    pandn %xmm0, %xmm15
+; SSE-NEXT:    por %xmm3, %xmm15
 ; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT:    movdqa %xmm10, (%rax)
-; SSE-NEXT:    movdqa %xmm14, 16(%rax)
-; SSE-NEXT:    movdqa %xmm15, 32(%rax)
-; SSE-NEXT:    movdqa %xmm7, 48(%rax)
-; SSE-NEXT:    movdqa %xmm11, 64(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 80(%rax)
+; SSE-NEXT:    movdqa %xmm15, 16(%rax)
+; SSE-NEXT:    movdqa %xmm7, 64(%rax)
+; SSE-NEXT:    movdqa %xmm12, 112(%rax)
+; SSE-NEXT:    movdqa %xmm11, 160(%rax)
+; SSE-NEXT:    movdqa %xmm1, (%rax)
+; SSE-NEXT:    movdqa %xmm6, 32(%rax)
+; SSE-NEXT:    movdqa %xmm9, 48(%rax)
+; SSE-NEXT:    movdqa %xmm14, 80(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 96(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 112(%rax)
-; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 128(%rax)
 ; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 144(%rax)
+; SSE-NEXT:    movaps %xmm0, 128(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps %xmm0, 160(%rax)
+; SSE-NEXT:    movaps %xmm0, 144(%rax)
 ; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE-NEXT:    movaps %xmm0, 176(%rax)
 ; SSE-NEXT:    addq $56, %rsp
@@ -1133,501 +1019,413 @@ define void @store_i8_stride6_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
 ;
 ; AVX1-LABEL: store_i8_stride6_vf32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    pushq %rax
 ; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm13[0,0,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm13[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX1-NEXT:    vandps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm1
+; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm2
 ; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[1,0,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandnps %ymm1, %ymm9, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm14 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX1-NEXT:    vandps %ymm0, %ymm14, %ymm0
+; AVX1-NEXT:    vmovdqa (%r9), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%r9), %xmm4
+; AVX1-NEXT:    vmovdqa (%r8), %xmm7
+; AVX1-NEXT:    vmovdqa 16(%r8), %xmm3
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm8, %ymm5
+; AVX1-NEXT:    vandnps %ymm5, %ymm14, %ymm5
+; AVX1-NEXT:    vorps %ymm5, %ymm0, %ymm0
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3],xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm8[2,2,3,3]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX1-NEXT:    vmovdqa (%rdx), %xmm7
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm2[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[1,0,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT:    vandnps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vandps %ymm5, %ymm9, %ymm5
+; AVX1-NEXT:    vorps %ymm0, %ymm5, %ymm11
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm12 = xmm7[0,0,1,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm5, %ymm5
+; AVX1-NEXT:    vandps %ymm14, %ymm11, %ymm11
+; AVX1-NEXT:    vandnps %ymm5, %ymm14, %ymm5
+; AVX1-NEXT:    vorps %ymm5, %ymm11, %ymm12
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm3
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm11 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[1,0,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
+; AVX1-NEXT:    vandnps %ymm3, %ymm9, %ymm3
+; AVX1-NEXT:    vandps %ymm5, %ymm9, %ymm5
+; AVX1-NEXT:    vorps %ymm3, %ymm5, %ymm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[3,3,3,3]
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[0,0,1,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm5, %ymm1
+; AVX1-NEXT:    vandps %ymm3, %ymm14, %ymm3
+; AVX1-NEXT:    vandnps %ymm1, %ymm14, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm3, %ymm3
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm7 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX1-NEXT:    vandnps %ymm1, %ymm7, %ymm2
-; AVX1-NEXT:    vmovdqa 16(%rcx), %xmm1
-; AVX1-NEXT:    vmovdqa 16(%rdx), %xmm3
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm8[0,1,2,3,5,6,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,2,3]
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[1,0,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vandps %ymm7, %ymm3, %ymm3
-; AVX1-NEXT:    vorps %ymm2, %ymm3, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vmovdqa 16(%r8), %xmm2
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,xmm2[8,u],zero,zero,zero,zero,xmm2[9,u],zero,zero,zero,zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,4],zero,xmm4[6,7,8,9,10],zero,xmm4[12,13,14,15]
-; AVX1-NEXT:    vmovdqa 16(%r9), %xmm5
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,xmm5[8],zero,zero,zero,zero,zero,xmm5[9],zero,zero,zero,zero
-; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm2[5,u],zero,zero,zero,zero,xmm2[6,u],zero,zero,zero,zero,xmm2[7,u]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6],xmm4[7]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,128,4,5,6,7,8,128,10,11,12,13,14,128]
-; AVX1-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm4, %xmm9
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm5[5],zero,zero,zero,zero,zero,xmm5[6],zero,zero,zero,zero,zero,xmm5[7]
-; AVX1-NEXT:    vpor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT:    vandps %ymm7, %ymm0, %ymm0
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT:    vandnps %ymm1, %ymm7, %ymm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[1,0,2,2,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm9, %ymm1
 ; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm2[13,u],zero,zero,zero,zero,xmm2[14,u],zero,zero,zero,zero,xmm2[15,u]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6],xmm3[7]
-; AVX1-NEXT:    vmovdqa %xmm9, %xmm4
-; AVX1-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm5[13],zero,zero,zero,zero,zero,xmm5[14],zero,zero,zero,zero,zero,xmm5[15]
-; AVX1-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[10,u],zero,zero,zero,zero,xmm2[11,u],zero,zero,zero,zero,xmm2[12,u],zero,zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2,3,4,5,6],zero,xmm0[8,9,10,11,12],zero,xmm0[14,15]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = zero,xmm5[10],zero,zero,zero,zero,zero,xmm5[11],zero,zero,zero,zero,zero,xmm5[12],zero,zero
-; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm10
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm12[1,1,2,2]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[3,3,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[0,0,0,0]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[1,1,2,2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandps %ymm0, %ymm14, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm14, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[3,3,3,3]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmovdqa (%rcx), %xmm15
-; AVX1-NEXT:    vmovdqa (%rdx), %xmm13
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm13[8],xmm15[8],xmm13[9],xmm15[9],xmm13[10],xmm15[10],xmm13[11],xmm15[11],xmm13[12],xmm15[12],xmm13[13],xmm15[13],xmm13[14],xmm15[14],xmm13[15],xmm15[15]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm15[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT:    vandnps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vandps %ymm1, %ymm9, %ymm1
+; AVX1-NEXT:    vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[3,3,3,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm11[0,1,2,3,5,6,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[2,2,2,3]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
-; AVX1-NEXT:    vandps %ymm7, %ymm0, %ymm0
-; AVX1-NEXT:    vandnps %ymm1, %ymm7, %ymm1
-; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm6
-; AVX1-NEXT:    vmovdqa (%r8), %xmm1
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm1[13,u],zero,zero,zero,zero,xmm1[14,u],zero,zero,zero,zero,xmm1[15,u]
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7]
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm7
-; AVX1-NEXT:    vmovdqa (%r9), %xmm0
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm0[13],zero,zero,zero,zero,zero,xmm0[14],zero,zero,zero,zero,zero,xmm0[15]
-; AVX1-NEXT:    vpor %xmm4, %xmm7, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = xmm1[10,u],zero,zero,zero,zero,xmm1[11,u],zero,zero,zero,zero,xmm1[12,u],zero,zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm14 = [0,128,2,3,4,5,6,128,8,9,10,11,12,128,14,15]
-; AVX1-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = zero,xmm0[10],zero,zero,zero,zero,zero,xmm0[11],zero,zero,zero,zero,zero,xmm0[12],zero,zero
-; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[0,0,1,1]
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm6 = xmm3[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm4
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm8[1,0,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[3,3,3,3,4,5,6,7]
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX1-NEXT:    vandps %ymm7, %ymm4, %ymm4
-; AVX1-NEXT:    vandnps %ymm6, %ymm7, %ymm6
-; AVX1-NEXT:    vorps %ymm6, %ymm4, %ymm4
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm6
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[2,u],zero,zero,zero,zero,xmm2[3,u],zero,zero,zero,zero,xmm2[4,u],zero,zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1,2],xmm3[3],xmm6[4,5],xmm3[6],xmm6[7]
-; AVX1-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = zero,xmm5[2],zero,zero,zero,zero,zero,xmm5[3],zero,zero,zero,zero,zero,xmm5[4],zero,zero
-; AVX1-NEXT:    vpor %xmm6, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[0,u],zero,zero,zero,zero,xmm2[1,u],zero,zero,zero,zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3,4],xmm2[5],xmm4[6,7]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,128,6,7,8,9,10,128,12,13,14,15]
-; AVX1-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm14 = [128,128,128,128,128,0,128,128,128,128,128,1,128,128,128,128]
-; AVX1-NEXT:    vpshufb %xmm14, %xmm5, %xmm3
-; AVX1-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,0,1,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[1,1,2,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3],xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm3[1,0,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[3,3,3,3,4,5,6,7]
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm10[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT:    vandps %ymm0, %ymm14, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm14, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[3,3,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,2,2]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vandnps %ymm1, %ymm9, %ymm1
+; AVX1-NEXT:    vandps %ymm4, %ymm9, %ymm4
+; AVX1-NEXT:    vorps %ymm1, %ymm4, %ymm1
+; AVX1-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm11[3,3,3,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
-; AVX1-NEXT:    vandps %ymm7, %ymm2, %ymm2
-; AVX1-NEXT:    vandnps %ymm4, %ymm7, %ymm4
-; AVX1-NEXT:    vorps %ymm4, %ymm2, %ymm4
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[2,u],zero,zero,zero,zero,xmm1[3,u],zero,zero,zero,zero,xmm1[4,u],zero,zero
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5],xmm2[6],xmm6[7]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0],zero,xmm2[2,3,4,5,6],zero,xmm2[8,9,10,11,12],zero,xmm2[14,15]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = zero,xmm0[2],zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,xmm0[4],zero,zero
-; AVX1-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm1[0,u],zero,zero,zero,zero,xmm1[1,u],zero,zero,zero,zero
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX1-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
-; AVX1-NEXT:    vpshufb %xmm14, %xmm0, %xmm6
-; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[0,0,1,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,2,3]
-; AVX1-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[1,0,2,2,4,5,6,7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX1-NEXT:    vmovaps {{.*#+}} ymm6 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX1-NEXT:    vandnps %ymm5, %ymm6, %ymm5
-; AVX1-NEXT:    vandps %ymm6, %ymm3, %ymm3
-; AVX1-NEXT:    vorps %ymm5, %ymm3, %ymm3
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,xmm1[8,u],zero,zero,zero,zero,xmm1[9,u],zero,zero,zero,zero
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4],xmm5[5],xmm6[6,7]
-; AVX1-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,xmm0[8],zero,zero,zero,zero,zero,xmm0[9],zero,zero,zero,zero
-; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[5,u],zero,zero,zero,zero,xmm1[6,u],zero,zero,zero,zero,xmm1[7,u]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2],zero,xmm1[4,5,6,7,8],zero,xmm1[10,11,12,13,14],zero
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7]
-; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm11[0,1,2,3,5,6,7,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; AVX1-NEXT:    vandps %ymm1, %ymm14, %ymm1
+; AVX1-NEXT:    vandnps %ymm4, %ymm14, %ymm4
+; AVX1-NEXT:    vorps %ymm4, %ymm1, %ymm1
 ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    vmovdqa %xmm0, 32(%rax)
-; AVX1-NEXT:    vmovdqa %xmm5, 48(%rax)
-; AVX1-NEXT:    vmovdqa %xmm4, (%rax)
-; AVX1-NEXT:    vmovdqa %xmm2, 16(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 96(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 112(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 64(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 80(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 160(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 176(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 128(%rax)
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vmovaps %xmm0, 144(%rax)
-; AVX1-NEXT:    popq %rax
+; AVX1-NEXT:    vmovaps %ymm1, 160(%rax)
+; AVX1-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm2, (%rax)
+; AVX1-NEXT:    vmovaps %ymm3, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm12, 32(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: store_i8_stride6_vf32:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $72, %rsp
-; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm8
-; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm15
-; AVX2-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm7
-; AVX2-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    subq $24, %rsp
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %ymm4
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %ymm12
+; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm9
 ; AVX2-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
 ; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm12
-; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa %xmm2, %xmm15
+; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
 ; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm11
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm3
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,0,0,1]
+; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm7
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
+; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm5
+; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa (%r8), %xmm9
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm2, %ymm10, %ymm14
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm15, %ymm10
-; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[4],ymm10[4],ymm2[5],ymm10[5],ymm2[6],ymm10[6],ymm2[7],ymm10[7],ymm2[16],ymm10[16],ymm2[17],ymm10[17],ymm2[18],ymm10[18],ymm2[19],ymm10[19],ymm2[20],ymm10[20],ymm2[21],ymm10[21],ymm2[22],ymm10[22],ymm2[23],ymm10[23]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm4, %ymm13
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm4, %ymm0
-; AVX2-SLOW-NEXT:    vpshufb %ymm10, %ymm4, %ymm15
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm10
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm10[0],ymm15[0],ymm10[1],ymm15[1],ymm10[2],ymm15[2],ymm10[3],ymm15[3],ymm10[4],ymm15[4],ymm10[5],ymm15[5],ymm10[6],ymm15[6],ymm10[7],ymm15[7],ymm10[16],ymm15[16],ymm10[17],ymm15[17],ymm10[18],ymm15[18],ymm10[19],ymm15[19],ymm10[20],ymm15[20],ymm10[21],ymm15[21],ymm10[22],ymm15[22],ymm10[23],ymm15[23]
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %ymm10
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm2, %ymm15, %ymm3
-; AVX2-SLOW-NEXT:    vmovdqa (%r9), %xmm8
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm13, %ymm3, %ymm15, %ymm13
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm8[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm14, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm13, %ymm14, %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm13[0],ymm3[1],ymm13[1],ymm3[2],ymm13[2],ymm3[3],ymm13[3],ymm3[4],ymm13[4],ymm3[5],ymm13[5],ymm3[6],ymm13[6],ymm3[7],ymm13[7],ymm3[16],ymm13[16],ymm3[17],ymm13[17],ymm3[18],ymm13[18],ymm3[19],ymm13[19],ymm3[20],ymm13[20],ymm3[21],ymm13[21],ymm3[22],ymm13[22],ymm3[23],ymm13[23]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm9, %ymm14
+; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm12, %ymm13
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm3, %ymm13, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,3,2,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm6, %xmm13
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3],xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm3, %ymm4, %ymm14
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, %ymm6
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, %ymm7
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm15
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm8[0],ymm15[0],ymm8[1],ymm15[1],ymm8[2],ymm15[2],ymm8[3],ymm15[3],ymm8[4],ymm15[4],ymm8[5],ymm15[5],ymm8[6],ymm15[6],ymm8[7],ymm15[7],ymm8[16],ymm15[16],ymm8[17],ymm15[17],ymm8[18],ymm15[18],ymm8[19],ymm15[19],ymm8[20],ymm15[20],ymm8[21],ymm15[21],ymm8[22],ymm15[22],ymm8[23],ymm15[23]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[2,2,2,3]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm11[0],ymm0[0],ymm11[1],ymm0[1],ymm11[2],ymm0[2],ymm11[3],ymm0[3],ymm11[4],ymm0[4],ymm11[5],ymm0[5],ymm11[6],ymm0[6],ymm11[7],ymm0[7],ymm11[16],ymm0[16],ymm11[17],ymm0[17],ymm11[18],ymm0[18],ymm11[19],ymm0[19],ymm11[20],ymm0[20],ymm11[21],ymm0[21],ymm11[22],ymm0[22],ymm11[23],ymm0[23]
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, %ymm11
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm2, %ymm4, %ymm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm9[2,u,1,u,0,u,3,u,4,u,4,u,4,u,4,u]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm14, %ymm4, %ymm4
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm1, %ymm14, %ymm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm4, %ymm14, %ymm4
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u]
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm4, %ymm13, %ymm4
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm12[8],ymm9[8],ymm12[9],ymm9[9],ymm12[10],ymm9[10],ymm12[11],ymm9[11],ymm12[12],ymm9[12],ymm12[13],ymm9[13],ymm12[14],ymm9[14],ymm12[15],ymm9[15],ymm12[24],ymm9[24],ymm12[25],ymm9[25],ymm12[26],ymm9[26],ymm12[27],ymm9[27],ymm12[28],ymm9[28],ymm12[29],ymm9[29],ymm12[30],ymm9[30],ymm12[31],ymm9[31]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm14 = ymm8[8],ymm15[8],ymm8[9],ymm15[9],ymm8[10],ymm15[10],ymm8[11],ymm15[11],ymm8[12],ymm15[12],ymm8[13],ymm15[13],ymm8[14],ymm15[14],ymm8[15],ymm15[15],ymm8[24],ymm15[24],ymm8[25],ymm15[25],ymm8[26],ymm15[26],ymm8[27],ymm15[27],ymm8[28],ymm15[28],ymm8[29],ymm15[29],ymm8[30],ymm15[30],ymm8[31],ymm15[31]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm1, %ymm14, %ymm14
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm11[8],xmm5[9],xmm11[9],xmm5[10],xmm11[10],xmm5[11],xmm11[11],xmm5[12],xmm11[12],xmm5[13],xmm11[13],xmm5[14],xmm11[14],xmm5[15],xmm11[15]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm13, %ymm14, %ymm10
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
+; AVX2-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm15, %ymm14
+; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm8, %ymm8
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm8[0],ymm14[0],ymm8[1],ymm14[1],ymm8[2],ymm14[2],ymm8[3],ymm14[3],ymm8[4],ymm14[4],ymm8[5],ymm14[5],ymm8[6],ymm14[6],ymm8[7],ymm14[7],ymm8[16],ymm14[16],ymm8[17],ymm14[17],ymm8[18],ymm14[18],ymm8[19],ymm14[19],ymm8[20],ymm14[20],ymm8[21],ymm14[21],ymm8[22],ymm14[22],ymm8[23],ymm14[23]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm12[0],ymm9[0],ymm12[1],ymm9[1],ymm12[2],ymm9[2],ymm12[3],ymm9[3],ymm12[4],ymm9[4],ymm12[5],ymm9[5],ymm12[6],ymm9[6],ymm12[7],ymm9[7],ymm12[16],ymm9[16],ymm12[17],ymm9[17],ymm12[18],ymm9[18],ymm12[19],ymm9[19],ymm12[20],ymm9[20],ymm12[21],ymm9[21],ymm12[22],ymm9[22],ymm12[23],ymm9[23]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[2,1,0,3,4,5,6,7,10,9,8,11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm5 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm5 = xmm6[8],mem[8],xmm6[9],mem[9],xmm6[10],mem[10],xmm6[11],mem[11],xmm6[12],mem[12],xmm6[13],mem[13],xmm6[14],mem[14],xmm6[15],mem[15]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm6[8],ymm7[8],ymm6[9],ymm7[9],ymm6[10],ymm7[10],ymm6[11],ymm7[11],ymm6[12],ymm7[12],ymm6[13],ymm7[13],ymm6[14],ymm7[14],ymm6[15],ymm7[15],ymm6[24],ymm7[24],ymm6[25],ymm7[25],ymm6[26],ymm7[26],ymm6[27],ymm7[27],ymm6[28],ymm7[28],ymm6[29],ymm7[29],ymm6[30],ymm7[30],ymm6[31],ymm7[31]
-; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm4, %ymm5, %ymm4
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm5[8],ymm11[8],ymm5[9],ymm11[9],ymm5[10],ymm11[10],ymm5[11],ymm11[11],ymm5[12],ymm11[12],ymm5[13],ymm11[13],ymm5[14],ymm11[14],ymm5[15],ymm11[15],ymm5[24],ymm11[24],ymm5[25],ymm11[25],ymm5[26],ymm11[26],ymm5[27],ymm11[27],ymm5[28],ymm11[28],ymm5[29],ymm11[29],ymm5[30],ymm11[30],ymm5[31],ymm11[31]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm5, %ymm7, %ymm5
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm9[10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm1, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,u,29,u,28,u,27,u,30,u,u,u,u,u,31,u]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm2, %ymm0
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,u,29,u,28,u,27,u,30,u,u,u,u,u,31]
-; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm2, %ymm10, %ymm5, %ymm2
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 160(%rax)
-; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm1, 128(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm14, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, (%rax)
-; AVX2-SLOW-NEXT:    addq $72, %rsp
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 128(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 160(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 64(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 32(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 96(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
+; AVX2-SLOW-NEXT:    addq $24, %rsp
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
 ; AVX2-FAST-LABEL: store_i8_stride6_vf32:
 ; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    subq $40, %rsp
-; AVX2-FAST-NEXT:    vmovaps (%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm7
-; AVX2-FAST-NEXT:    vmovdqa (%rcx), %ymm8
+; AVX2-FAST-NEXT:    subq $72, %rsp
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX2-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm8
 ; AVX2-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa (%r8), %ymm4
-; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa (%r9), %ymm6
 ; AVX2-FAST-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
 ; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm1
-; AVX2-FAST-NEXT:    vmovdqa %xmm2, %xmm6
-; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm12
-; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa %xmm2, %xmm7
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm15
+; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm15, %xmm0
 ; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm11
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm11, %xmm2
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm3
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa (%r8), %xmm9
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm13, %ymm2, %ymm10, %ymm14
+; AVX2-FAST-NEXT:    vmovdqa (%r9), %xmm14
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm2
+; AVX2-FAST-NEXT:    vmovdqa (%r8), %xmm4
+; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm10
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa %ymm7, %ymm1
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[4],ymm10[4],ymm2[5],ymm10[5],ymm2[6],ymm10[6],ymm2[7],ymm10[7],ymm2[16],ymm10[16],ymm2[17],ymm10[17],ymm2[18],ymm10[18],ymm2[19],ymm10[19],ymm2[20],ymm10[20],ymm2[21],ymm10[21],ymm2[22],ymm10[22],ymm2[23],ymm10[23]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vmovdqa %ymm15, %ymm0
-; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm15
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm7, %ymm10
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm10[0],ymm15[0],ymm10[1],ymm15[1],ymm10[2],ymm15[2],ymm10[3],ymm15[3],ymm10[4],ymm15[4],ymm10[5],ymm15[5],ymm10[6],ymm15[6],ymm10[7],ymm15[7],ymm10[16],ymm15[16],ymm10[17],ymm15[17],ymm10[18],ymm15[18],ymm10[19],ymm15[19],ymm10[20],ymm15[20],ymm10[21],ymm15[21],ymm10[22],ymm15[22],ymm10[23],ymm15[23]
-; AVX2-FAST-NEXT:    vmovdqa (%r9), %ymm10
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm12
+; AVX2-FAST-NEXT:    vmovdqa %ymm3, %ymm10
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm12[0],ymm2[1],ymm12[1],ymm2[2],ymm12[2],ymm2[3],ymm12[3],ymm2[4],ymm12[4],ymm2[5],ymm12[5],ymm2[6],ymm12[6],ymm2[7],ymm12[7],ymm2[16],ymm12[16],ymm2[17],ymm12[17],ymm2[18],ymm12[18],ymm2[19],ymm12[19],ymm2[20],ymm12[20],ymm2[21],ymm12[21],ymm2[22],ymm12[22],ymm2[23],ymm12[23]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm3, %ymm2, %ymm15, %ymm3
-; AVX2-FAST-NEXT:    vmovdqa (%r9), %xmm8
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm13, %ymm3, %ymm15, %ymm13
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm8[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm14, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm13
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm12
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm2, %ymm12, %ymm2
 ; AVX2-FAST-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm13, %ymm14, %ymm2
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm11[8],xmm5[9],xmm11[9],xmm5[10],xmm11[10],xmm5[11],xmm11[11],xmm5[12],xmm11[12],xmm5[13],xmm11[13],xmm5[14],xmm11[14],xmm5[15],xmm11[15]
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm6[8],xmm12[9],xmm6[9],xmm12[10],xmm6[10],xmm12[11],xmm6[11],xmm12[12],xmm6[12],xmm12[13],xmm6[13],xmm12[14],xmm6[14],xmm12[15],xmm6[15]
-; AVX2-FAST-NEXT:    vmovdqa %xmm6, %xmm13
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm3, %ymm4, %ymm14
-; AVX2-FAST-NEXT:    vmovdqa %ymm0, %ymm6
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm7[8],ymm0[8],ymm7[9],ymm0[9],ymm7[10],ymm0[10],ymm7[11],ymm0[11],ymm7[12],ymm0[12],ymm7[13],ymm0[13],ymm7[14],ymm0[14],ymm7[15],ymm0[15],ymm7[24],ymm0[24],ymm7[25],ymm0[25],ymm7[26],ymm0[26],ymm7[27],ymm0[27],ymm7[28],ymm0[28],ymm7[29],ymm0[29],ymm7[30],ymm0[30],ymm7[31],ymm0[31]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[2,2,2,3]
-; AVX2-FAST-NEXT:    vmovdqa %ymm1, %ymm3
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm2, %ymm4, %ymm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm9[10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm14, %ymm4, %ymm4
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,u,29,u,28,u,27,u,30,u,u,u,u,u,31,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm1, %ymm14, %ymm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm4, %ymm14, %ymm4
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,u,29,u,28,u,27,u,30,u,u,u,u,u,31]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm1, %ymm14, %ymm14
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,0,1,6,7,4,5,8,9,8,9,8,9,8,9]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm1, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[4],ymm6[4],ymm7[5],ymm6[5],ymm7[6],ymm6[6],ymm7[7],ymm6[7],ymm7[16],ymm6[16],ymm7[17],ymm6[17],ymm7[18],ymm6[18],ymm7[19],ymm6[19],ymm7[20],ymm6[20],ymm7[21],ymm6[21],ymm7[22],ymm6[22],ymm7[23],ymm6[23]
-; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,16,17,22,23,20,21,24,25,24,25,24,25,24,25]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm5, %ymm7, %ymm5
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm9[2,u,1,u,0,u,3,u,4,u,4,u,4,u,4,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u,u]
-; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4]
+; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm2, %ymm0
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u]
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3],xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7]
+; AVX2-FAST-NEXT:    vmovdqa %xmm7, %xmm11
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,3,0,1,6,7,4,5,8,9,8,9,8,9,8,9]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[4],ymm10[4],ymm7[5],ymm10[5],ymm7[6],ymm10[6],ymm7[7],ymm10[7],ymm7[16],ymm10[16],ymm7[17],ymm10[17],ymm7[18],ymm10[18],ymm7[19],ymm10[19],ymm7[20],ymm10[20],ymm7[21],ymm10[21],ymm7[22],ymm10[22],ymm7[23],ymm10[23]
+; AVX2-FAST-NEXT:    vmovdqa %ymm10, %ymm8
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,16,17,22,23,20,21,24,25,24,25,24,25,24,25]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm12, %ymm13, %ymm12
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm13, %ymm10, %ymm9
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-FAST-NEXT:    vpblendvb %ymm1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm1, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm15[8],xmm11[8],xmm15[9],xmm11[9],xmm15[10],xmm11[10],xmm15[11],xmm11[11],xmm15[12],xmm11[12],xmm15[13],xmm11[13],xmm15[14],xmm11[14],xmm15[15],xmm11[15]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[4,5,2,3,0,1,6,7,8,9,8,9,8,9,8,9]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX2-FAST-NEXT:    vpblendvb %ymm1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm1, %ymm12, %ymm5, %ymm5
+; AVX2-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm1, %ymm9, %ymm6, %ymm1
 ; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-NEXT:    vmovdqa %ymm1, 96(%rax)
-; AVX2-FAST-NEXT:    vmovdqa %ymm14, 160(%rax)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm1, 128(%rax)
-; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX2-FAST-NEXT:    vmovdqa %ymm4, 64(%rax)
-; AVX2-FAST-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX2-FAST-NEXT:    addq $40, %rsp
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, 128(%rax)
+; AVX2-FAST-NEXT:    vmovdqa %ymm1, 160(%rax)
+; AVX2-FAST-NEXT:    vmovdqa %ymm5, 96(%rax)
+; AVX2-FAST-NEXT:    vmovdqa %ymm4, (%rax)
+; AVX2-FAST-NEXT:    vmovdqa %ymm3, 64(%rax)
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, 32(%rax)
+; AVX2-FAST-NEXT:    addq $72, %rsp
 ; AVX2-FAST-NEXT:    vzeroupper
 ; AVX2-FAST-NEXT:    retq
 ;
 ; AVX512-LABEL: store_i8_stride6_vf32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX512-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX512-NEXT:    vmovdqa (%rsi), %ymm9
 ; AVX512-NEXT:    vmovdqa (%rdx), %ymm10
 ; AVX512-NEXT:    vmovdqa (%rcx), %ymm11
-; AVX512-NEXT:    vmovdqa (%r8), %ymm8
-; AVX512-NEXT:    vmovdqa (%r9), %ymm9
-; AVX512-NEXT:    vmovdqa (%rsi), %xmm14
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX512-NEXT:    vpshufb %xmm6, %xmm14, %xmm0
+; AVX512-NEXT:    vmovdqa (%r8), %ymm12
+; AVX512-NEXT:    vmovdqa (%r9), %ymm13
+; AVX512-NEXT:    vmovdqa (%rsi), %xmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
+; AVX512-NEXT:    vpshufb %xmm7, %xmm6, %xmm0
 ; AVX512-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512-NEXT:    vpshufb %xmm6, %xmm1, %xmm6
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; AVX512-NEXT:    vpshufb %xmm7, %xmm1, %xmm7
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; AVX512-NEXT:    vmovdqa (%rcx), %xmm6
+; AVX512-NEXT:    vmovdqa (%rcx), %xmm14
 ; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm2, %xmm6, %xmm3
+; AVX512-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
 ; AVX512-NEXT:    vmovdqa (%rdx), %xmm4
 ; AVX512-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
@@ -1635,79 +1433,77 @@ define void @store_i8_stride6_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
 ; AVX512-NEXT:    movw $18724, %cx # imm = 0x4924
 ; AVX512-NEXT:    kmovd %ecx, %k1
 ; AVX512-NEXT:    vmovdqu16 %ymm0, %ymm2 {%k1}
-; AVX512-NEXT:    vmovdqa (%r9), %xmm3
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm0, %xmm3, %xmm5
+; AVX512-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm3, %xmm0, %xmm5
 ; AVX512-NEXT:    vmovdqa (%r8), %xmm7
-; AVX512-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX512-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
 ; AVX512-NEXT:    movw $9362, %cx # imm = 0x2492
 ; AVX512-NEXT:    kmovd %ecx, %k2
-; AVX512-NEXT:    vmovdqu16 %ymm0, %ymm2 {%k2}
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; AVX512-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k2}
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512-NEXT:    vpermw %ymm0, %ymm5, %ymm0
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512-NEXT:    vpermw %ymm3, %ymm5, %ymm3
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
 ; AVX512-NEXT:    vprold $16, %xmm5, %xmm5
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
-; AVX512-NEXT:    vmovdqu16 %ymm5, %ymm0 {%k2}
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
+; AVX512-NEXT:    vmovdqu16 %ymm5, %ymm3 {%k2}
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm15 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512-NEXT:    vpermw %ymm5, %ymm15, %ymm0 {%k1}
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512-NEXT:    vpermw %ymm2, %ymm5, %ymm2
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[16],ymm11[16],ymm10[17],ymm11[17],ymm10[18],ymm11[18],ymm10[19],ymm11[19],ymm10[20],ymm11[20],ymm10[21],ymm11[21],ymm10[22],ymm11[22],ymm10[23],ymm11[23]
-; AVX512-NEXT:    vprold $16, %ymm5, %ymm5
-; AVX512-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX512-NEXT:    vmovdqu16 %ymm5, %ymm2 {%k2}
+; AVX512-NEXT:    vpermw %ymm5, %ymm15, %ymm3 {%k1}
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23]
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[4],ymm9[4],ymm8[5],ymm9[5],ymm8[6],ymm9[6],ymm8[7],ymm9[7],ymm8[16],ymm9[16],ymm8[17],ymm9[17],ymm8[18],ymm9[18],ymm8[19],ymm9[19],ymm8[20],ymm9[20],ymm8[21],ymm9[21],ymm8[22],ymm9[22],ymm8[23],ymm9[23]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm15 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
+; AVX512-NEXT:    vpermw %ymm5, %ymm15, %ymm5
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm15 = [10,9,8,11,10,9,8,11,10,9,8,11,12,12,12,12]
-; AVX512-NEXT:    vpermw %ymm5, %ymm15, %ymm2 {%k1}
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm14[8],xmm1[9],xmm14[9],xmm1[10],xmm14[10],xmm1[11],xmm14[11],xmm1[12],xmm14[12],xmm1[13],xmm14[13],xmm1[14],xmm14[14],xmm1[15],xmm14[15]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512-NEXT:    vpermw %ymm1, %ymm5, %ymm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512-NEXT:    vpermw %ymm4, %ymm5, %ymm1 {%k1}
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm7[8],xmm3[8],xmm7[9],xmm3[9],xmm7[10],xmm3[10],xmm7[11],xmm3[11],xmm7[12],xmm3[12],xmm7[13],xmm3[13],xmm7[14],xmm3[14],xmm7[15],xmm3[15]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
-; AVX512-NEXT:    movw $-28087, %cx # imm = 0x9249
-; AVX512-NEXT:    kmovd %ecx, %k3
-; AVX512-NEXT:    vpermw %ymm3, %ymm4, %ymm1 {%k3}
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15],ymm10[24],ymm11[24],ymm10[25],ymm11[25],ymm10[26],ymm11[26],ymm10[27],ymm11[27],ymm10[28],ymm11[28],ymm10[29],ymm11[29],ymm10[30],ymm11[30],ymm10[31],ymm11[31]
+; AVX512-NEXT:    vpermw %ymm3, %ymm15, %ymm5 {%k1}
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[16],ymm11[16],ymm10[17],ymm11[17],ymm10[18],ymm11[18],ymm10[19],ymm11[19],ymm10[20],ymm11[20],ymm10[21],ymm11[21],ymm10[22],ymm11[22],ymm10[23],ymm11[23]
+; AVX512-NEXT:    vprold $16, %ymm3, %ymm3
+; AVX512-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512-NEXT:    vmovdqu16 %ymm3, %ymm5 {%k2}
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm3 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
+; AVX512-NEXT:    vpermw %ymm0, %ymm3, %ymm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512-NEXT:    vpermw %ymm1, %ymm3, %ymm0 {%k2}
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512-NEXT:    vpermw %ymm1, %ymm3, %ymm0 {%k1}
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15],ymm10[24],ymm11[24],ymm10[25],ymm11[25],ymm10[26],ymm11[26],ymm10[27],ymm11[27],ymm10[28],ymm11[28],ymm10[29],ymm11[29],ymm10[30],ymm11[30],ymm10[31],ymm11[31]
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15],ymm12[24],ymm13[24],ymm12[25],ymm13[25],ymm12[26],ymm13[26],ymm12[27],ymm13[27],ymm12[28],ymm13[28],ymm12[29],ymm13[29],ymm12[30],ymm13[30],ymm12[31],ymm13[31]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
 ; AVX512-NEXT:    vpermw %ymm3, %ymm4, %ymm3
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512-NEXT:    vpermw %ymm2, %ymm4, %ymm3 {%k1}
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm8[8],ymm9[8],ymm8[9],ymm9[9],ymm8[10],ymm9[10],ymm8[11],ymm9[11],ymm8[12],ymm9[12],ymm8[13],ymm9[13],ymm8[14],ymm9[14],ymm8[15],ymm9[15],ymm8[24],ymm9[24],ymm8[25],ymm9[25],ymm8[26],ymm9[26],ymm8[27],ymm9[27],ymm8[28],ymm9[28],ymm8[29],ymm9[29],ymm8[30],ymm9[30],ymm8[31],ymm9[31]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512-NEXT:    vpermw %ymm2, %ymm4, %ymm3 {%k3}
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %ymm2, %ymm13, %ymm4
-; AVX512-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512-NEXT:    vpermw %ymm1, %ymm4, %ymm3 {%k1}
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm8[8],ymm9[8],ymm8[9],ymm9[9],ymm8[10],ymm9[10],ymm8[11],ymm9[11],ymm8[12],ymm9[12],ymm8[13],ymm9[13],ymm8[14],ymm9[14],ymm8[15],ymm9[15],ymm8[24],ymm9[24],ymm8[25],ymm9[25],ymm8[26],ymm9[26],ymm8[27],ymm9[27],ymm8[28],ymm9[28],ymm8[29],ymm9[29],ymm8[30],ymm9[30],ymm8[31],ymm9[31]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512-NEXT:    vpermw %ymm1, %ymm4, %ymm3 {%k2}
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %ymm1, %ymm13, %ymm4
+; AVX512-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[1],ymm4[1],ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[16],ymm4[16],ymm1[17],ymm4[17],ymm1[18],ymm4[18],ymm1[19],ymm4[19],ymm1[20],ymm4[20],ymm1[21],ymm4[21],ymm1[22],ymm4[22],ymm1[23],ymm4[23]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
 ; AVX512-NEXT:    vpshufb %ymm4, %ymm11, %ymm5
 ; AVX512-NEXT:    vpshufb %ymm4, %ymm10, %ymm4
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512-NEXT:    vmovdqu16 %ymm2, %ymm4 {%k1}
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %ymm2, %ymm9, %ymm5
-; AVX512-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[16],ymm5[16],ymm2[17],ymm5[17],ymm2[18],ymm5[18],ymm2[19],ymm5[19],ymm2[20],ymm5[20],ymm2[21],ymm5[21],ymm2[22],ymm5[22],ymm2[23],ymm5[23]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512-NEXT:    vmovdqu16 %ymm2, %ymm4 {%k2}
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm2
-; AVX512-NEXT:    vmovdqu64 %zmm2, 128(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm1, 64(%rax)
-; AVX512-NEXT:    vmovdqu64 %zmm0, (%rax)
+; AVX512-NEXT:    vmovdqu16 %ymm1, %ymm4 {%k2}
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %ymm1, %ymm9, %ymm5
+; AVX512-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[4],ymm5[4],ymm1[5],ymm5[5],ymm1[6],ymm5[6],ymm1[7],ymm5[7],ymm1[16],ymm5[16],ymm1[17],ymm5[17],ymm1[18],ymm5[18],ymm1[19],ymm5[19],ymm1[20],ymm5[20],ymm1[21],ymm5[21],ymm1[22],ymm5[22],ymm1[23],ymm5[23]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512-NEXT:    vmovdqu16 %ymm1, %ymm4 {%k1}
+; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm1
+; AVX512-NEXT:    vmovdqu64 %zmm1, 128(%rax)
+; AVX512-NEXT:    vmovdqu64 %zmm0, 64(%rax)
+; AVX512-NEXT:    vmovdqu64 %zmm2, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <32 x i8>, <32 x i8>* %in.vecptr0, align 32

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
index 9fb4252b3e71d..be65effbc7241 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
@@ -108,12 +108,12 @@ define void @PR46178(i16* %0) {
 ; X86-NEXT:    vmovdqu (%eax), %ymm1
 ; X86-NEXT:    vpmovqw %ymm0, %xmm0
 ; X86-NEXT:    vpmovqw %ymm1, %xmm1
-; X86-NEXT:    vpsllw $8, %xmm0, %xmm0
-; X86-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; X86-NEXT:    vpsllw $8, %xmm1, %xmm1
 ; X86-NEXT:    vpsraw $8, %xmm1, %xmm1
-; X86-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; X86-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-NEXT:    vpsllw $8, %xmm0, %xmm0
+; X86-NEXT:    vpsraw $8, %xmm0, %xmm0
+; X86-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
+; X86-NEXT:    vmovupd %ymm0, (%eax)
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -126,9 +126,8 @@ define void @PR46178(i16* %0) {
 ; X64-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; X64-NEXT:    vpsllw $8, %ymm0, %ymm0
 ; X64-NEXT:    vpsraw $8, %ymm0, %ymm0
-; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,1]
+; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; X64-NEXT:    vmovdqa %xmm0, %xmm0
 ; X64-NEXT:    vmovdqu %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index 87e5cf95e49cc..148af61e74730 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3219,74 +3219,106 @@ define void @PR43024() {
 define void @PR45604(<32 x i16>* %dst, <8 x i16>* %src) {
 ; SSE2-LABEL: PR45604:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa (%rsi), %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
-; SSE2-NEXT:    movdqa %xmm2, %xmm3
-; SSE2-NEXT:    pandn %xmm1, %xmm3
-; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,0,0,0,11,0,0,0,0,0,0,0,11,0,0,0]
-; SSE2-NEXT:    por %xmm1, %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
-; SSE2-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; SSE2-NEXT:    movdqa %xmm2, %xmm5
-; SSE2-NEXT:    pandn %xmm4, %xmm5
-; SSE2-NEXT:    por %xmm1, %xmm5
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,2,2,2]
-; SSE2-NEXT:    pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; SSE2-NEXT:    movdqa %xmm2, %xmm6
-; SSE2-NEXT:    pandn %xmm4, %xmm6
-; SSE2-NEXT:    por %xmm1, %xmm6
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; SSE2-NEXT:    pandn %xmm0, %xmm2
-; SSE2-NEXT:    por %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, 48(%rdi)
-; SSE2-NEXT:    movdqa %xmm6, 32(%rdi)
-; SSE2-NEXT:    movdqa %xmm5, 16(%rdi)
-; SSE2-NEXT:    movdqa %xmm3, (%rdi)
+; SSE2-NEXT:    movdqa (%rsi), %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    movzwl %ax, %eax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movl $11, %eax
+; SSE2-NEXT:    pinsrw $2, %eax, %xmm0
+; SSE2-NEXT:    pextrw $1, %xmm1, %ecx
+; SSE2-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSE2-NEXT:    pinsrw $6, %eax, %xmm0
+; SSE2-NEXT:    pextrw $2, %xmm1, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm2
+; SSE2-NEXT:    pinsrw $2, %eax, %xmm2
+; SSE2-NEXT:    pextrw $3, %xmm1, %ecx
+; SSE2-NEXT:    pinsrw $4, %ecx, %xmm2
+; SSE2-NEXT:    pinsrw $6, %eax, %xmm2
+; SSE2-NEXT:    pextrw $4, %xmm1, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm3
+; SSE2-NEXT:    pinsrw $2, %eax, %xmm3
+; SSE2-NEXT:    pextrw $5, %xmm1, %ecx
+; SSE2-NEXT:    pinsrw $4, %ecx, %xmm3
+; SSE2-NEXT:    pinsrw $6, %eax, %xmm3
+; SSE2-NEXT:    pextrw $6, %xmm1, %ecx
+; SSE2-NEXT:    movd %ecx, %xmm4
+; SSE2-NEXT:    pinsrw $2, %eax, %xmm4
+; SSE2-NEXT:    pextrw $7, %xmm1, %ecx
+; SSE2-NEXT:    pinsrw $4, %ecx, %xmm4
+; SSE2-NEXT:    pinsrw $6, %eax, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, 48(%rdi)
+; SSE2-NEXT:    movdqa %xmm3, 32(%rdi)
+; SSE2-NEXT:    movdqa %xmm2, 16(%rdi)
+; SSE2-NEXT:    movdqa %xmm0, (%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: PR45604:
 ; SSSE3:       # %bb.0:
-; SSSE3-NEXT:    movdqa (%rsi), %xmm0
-; SSSE3-NEXT:    movdqa %xmm0, %xmm1
-; SSSE3-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[0,1],zero,zero,zero,zero,zero,zero,xmm1[2,3],zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [0,0,0,0,11,0,0,0,0,0,0,0,11,0,0,0]
-; SSSE3-NEXT:    movdqa %xmm0, %xmm3
-; SSSE3-NEXT:    pshufb {{.*#+}} xmm3 = xmm3[4,5],zero,zero,zero,zero,zero,zero,xmm3[6,7],zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT:    por %xmm2, %xmm1
-; SSSE3-NEXT:    por %xmm2, %xmm3
-; SSSE3-NEXT:    movdqa %xmm0, %xmm4
-; SSSE3-NEXT:    pshufb {{.*#+}} xmm4 = xmm4[8,9],zero,zero,zero,zero,zero,zero,xmm4[10,11],zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT:    por %xmm2, %xmm4
-; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[12,13],zero,zero,zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT:    por %xmm2, %xmm0
-; SSSE3-NEXT:    movdqa %xmm0, 48(%rdi)
-; SSSE3-NEXT:    movdqa %xmm4, 32(%rdi)
-; SSSE3-NEXT:    movdqa %xmm3, 16(%rdi)
-; SSSE3-NEXT:    movdqa %xmm1, (%rdi)
+; SSSE3-NEXT:    movdqa (%rsi), %xmm1
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    movzwl %ax, %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movl $11, %eax
+; SSSE3-NEXT:    pinsrw $2, %eax, %xmm0
+; SSSE3-NEXT:    pextrw $1, %xmm1, %ecx
+; SSSE3-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSSE3-NEXT:    pinsrw $6, %eax, %xmm0
+; SSSE3-NEXT:    pextrw $2, %xmm1, %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm2
+; SSSE3-NEXT:    pinsrw $2, %eax, %xmm2
+; SSSE3-NEXT:    pextrw $3, %xmm1, %ecx
+; SSSE3-NEXT:    pinsrw $4, %ecx, %xmm2
+; SSSE3-NEXT:    pinsrw $6, %eax, %xmm2
+; SSSE3-NEXT:    pextrw $4, %xmm1, %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm3
+; SSSE3-NEXT:    pinsrw $2, %eax, %xmm3
+; SSSE3-NEXT:    pextrw $5, %xmm1, %ecx
+; SSSE3-NEXT:    pinsrw $4, %ecx, %xmm3
+; SSSE3-NEXT:    pinsrw $6, %eax, %xmm3
+; SSSE3-NEXT:    pextrw $6, %xmm1, %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm4
+; SSSE3-NEXT:    pinsrw $2, %eax, %xmm4
+; SSSE3-NEXT:    pextrw $7, %xmm1, %ecx
+; SSSE3-NEXT:    pinsrw $4, %ecx, %xmm4
+; SSSE3-NEXT:    pinsrw $6, %eax, %xmm4
+; SSSE3-NEXT:    movdqa %xmm4, 48(%rdi)
+; SSSE3-NEXT:    movdqa %xmm3, 32(%rdi)
+; SSSE3-NEXT:    movdqa %xmm2, 16(%rdi)
+; SSSE3-NEXT:    movdqa %xmm0, (%rdi)
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: PR45604:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa (%rsi), %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = <u,0,11,0,u,0,11,0>
-; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
-; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3],xmm3[4],xmm2[5,6,7]
-; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1,2,3],xmm4[4],xmm2[5,6,7]
-; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
-; SSE41-NEXT:    movdqa %xmm0, (%rdi)
-; SSE41-NEXT:    movdqa %xmm4, 48(%rdi)
-; SSE41-NEXT:    movdqa %xmm3, 32(%rdi)
-; SSE41-NEXT:    movdqa %xmm1, 16(%rdi)
+; SSE41-NEXT:    movdqa (%rsi), %xmm1
+; SSE41-NEXT:    pextrw $2, %xmm1, %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    movl $11, %eax
+; SSE41-NEXT:    pinsrw $2, %eax, %xmm0
+; SSE41-NEXT:    pextrw $3, %xmm1, %ecx
+; SSE41-NEXT:    pinsrw $4, %ecx, %xmm0
+; SSE41-NEXT:    pinsrw $6, %eax, %xmm0
+; SSE41-NEXT:    pextrw $4, %xmm1, %ecx
+; SSE41-NEXT:    movd %ecx, %xmm2
+; SSE41-NEXT:    pinsrw $2, %eax, %xmm2
+; SSE41-NEXT:    pextrw $5, %xmm1, %ecx
+; SSE41-NEXT:    pinsrw $4, %ecx, %xmm2
+; SSE41-NEXT:    pinsrw $6, %eax, %xmm2
+; SSE41-NEXT:    pextrw $6, %xmm1, %ecx
+; SSE41-NEXT:    movd %ecx, %xmm3
+; SSE41-NEXT:    pinsrw $2, %eax, %xmm3
+; SSE41-NEXT:    pextrw $7, %xmm1, %ecx
+; SSE41-NEXT:    pinsrw $4, %ecx, %xmm3
+; SSE41-NEXT:    pinsrw $6, %eax, %xmm3
+; SSE41-NEXT:    pxor %xmm4, %xmm4
+; SSE41-NEXT:    pblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3,4,5,6,7]
+; SSE41-NEXT:    pinsrw $2, %eax, %xmm4
+; SSE41-NEXT:    pextrw $1, %xmm1, %ecx
+; SSE41-NEXT:    pinsrw $4, %ecx, %xmm4
+; SSE41-NEXT:    pinsrw $6, %eax, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, (%rdi)
+; SSE41-NEXT:    movdqa %xmm3, 48(%rdi)
+; SSE41-NEXT:    movdqa %xmm2, 32(%rdi)
+; SSE41-NEXT:    movdqa %xmm0, 16(%rdi)
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: PR45604:

diff  --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index bae9ab6bfb232..1ee33aea1c614 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -1113,171 +1113,110 @@ ret void
 define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <192 x i8>* %p) {
 ; AVX1-LABEL: interleaved_store_vf64_i8_stride3:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $88, %rsp
-; AVX1-NEXT:    .cfi_def_cfa_offset 96
-; AVX1-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovdqa %ymm3, %ymm11
-; AVX1-NEXT:    vmovdqa %ymm2, %ymm12
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm10
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm13
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,128,128,128,6,7,8,9,10>
-; AVX1-NEXT:    vpshufb %xmm5, %xmm13, %xmm8
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,5,6,7,8,9,10,128,128,128,128,128>
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm15
-; AVX1-NEXT:    vpshufb %xmm2, %xmm15, %xmm6
-; AVX1-NEXT:    vpor %xmm6, %xmm8, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm5, %xmm1, %xmm9
-; AVX1-NEXT:    vpshufb %xmm2, %xmm11, %xmm6
-; AVX1-NEXT:    vpor %xmm6, %xmm9, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm5, %xmm10, %xmm14
-; AVX1-NEXT:    vextractf128 $1, %ymm12, %xmm6
-; AVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm7
-; AVX1-NEXT:    vpor %xmm7, %xmm14, %xmm3
-; AVX1-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [11,12,13,14,15,0,1,2,3,4,5,128,128,128,128,128]
-; AVX1-NEXT:    vpshufb %xmm7, %xmm10, %xmm10
-; AVX1-NEXT:    vpshufb %xmm7, %xmm1, %xmm3
-; AVX1-NEXT:    vpshufb %xmm7, %xmm13, %xmm13
-; AVX1-NEXT:    vpshufb %xmm7, %xmm0, %xmm4
-; AVX1-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX1-NEXT:    vpshufb %xmm2, %xmm12, %xmm7
-; AVX1-NEXT:    vpor %xmm0, %xmm7, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,4,6,8,10,12,14,7,9,11,13,15>
-; AVX1-NEXT:    vpshufb %xmm0, %xmm7, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm8, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm8[8],xmm11[8],xmm8[9],xmm11[9],xmm8[10],xmm11[10],xmm8[11],xmm11[11],xmm8[12],xmm11[12],xmm8[13],xmm11[13],xmm8[14],xmm11[14],xmm8[15],xmm11[15]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm9, %xmm9
-; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm1
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
-; AVX1-NEXT:    vpor %xmm5, %xmm13, %xmm5
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4]
-; AVX1-NEXT:    vpor %xmm5, %xmm3, %xmm5
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4]
-; AVX1-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,1,2,3,4]
-; AVX1-NEXT:    vpor %xmm5, %xmm10, %xmm5
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm15 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4]
-; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm6
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm14 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4]
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm8 = xmm6[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm9 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm12 = xmm7[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm10
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm13 = xmm10[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm15 = xmm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm11
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm7 = xmm11[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm1 = xmm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm14 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm6
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
 ; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm13 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
-; AVX1-NEXT:    vpalignr $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm9 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm9 = mem[5,6,7,8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4]
-; AVX1-NEXT:    vpalignr $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm3 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm3 = mem[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,128,11,6,128,12,7,128,13,8,128,14,9,128,15,10]
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm6
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [128,5,128,128,6,128,128,7,128,128,8,128,128,9,128,128]
-; AVX1-NEXT:    vpshufb %xmm4, %xmm2, %xmm5
-; AVX1-NEXT:    vpor %xmm5, %xmm6, %xmm11
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm5
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm1 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm12[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm12 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm7 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm9 = xmm15[5,6,7,8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm15 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4]
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm8[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm8 = xmm13[5,6,7,8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm10 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm13 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm3 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm9 = xmm15[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm7 = xmm12[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm11 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm2 = xmm14[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm6 = mem[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb %xmm4, %xmm2, %xmm14
+; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm12
+; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
 ; AVX1-NEXT:    vpshufb %xmm4, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm10
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm7
-; AVX1-NEXT:    vpshufb %xmm4, %xmm8, %xmm6
-; AVX1-NEXT:    vpor %xmm6, %xmm7, %xmm12
-; AVX1-NEXT:    vmovdqa (%rsp), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
-; AVX1-NEXT:    vpshufb %xmm1, %xmm14, %xmm4
-; AVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX1-NEXT:    vpshufb %xmm1, %xmm15, %xmm7
-; AVX1-NEXT:    vpshufb %xmm1, %xmm9, %xmm2
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm6
-; AVX1-NEXT:    vpshufb %xmm1, %xmm13, %xmm8
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm9
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; AVX1-NEXT:    vmovdqu %xmm2, 80(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm10, 64(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm11, 16(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm4, (%rdi)
-; AVX1-NEXT:    vmovdqu %xmm7, 48(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm3, 32(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm1, 176(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm0, 160(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm12, 112(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm6, 96(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm9, 144(%rdi)
-; AVX1-NEXT:    vmovdqu %xmm8, 128(%rdi)
-; AVX1-NEXT:    addq $88, %rsp
-; AVX1-NEXT:    .cfi_def_cfa_offset 8
+; AVX1-NEXT:    vpshufb %xmm4, %xmm11, %xmm1
+; AVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpshufb %xmm4, %xmm3, %xmm11
+; AVX1-NEXT:    vpshufb %xmm4, %xmm9, %xmm9
+; AVX1-NEXT:    vpshufb %xmm4, %xmm10, %xmm2
+; AVX1-NEXT:    vpshufb %xmm4, %xmm8, %xmm3
+; AVX1-NEXT:    vpshufb %xmm4, %xmm13, %xmm4
+; AVX1-NEXT:    vmovdqu %xmm1, 80(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm7, 64(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm6, 16(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm14, (%rdi)
+; AVX1-NEXT:    vmovdqu %xmm0, 48(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm12, 32(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm4, 176(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm3, 160(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm5, 112(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm11, 96(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm2, 144(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm9, 128(%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: interleaved_store_vf64_i8_stride3:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm6 = ymm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
-; AVX2-NEXT:    vpslldq {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,1,2,3,4],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,17,18,19,20]
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX2-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm7 = ymm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
-; AVX2-NEXT:    vpslldq {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,1,2,3,4],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17,18,19,20]
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm7, %ymm9, %ymm7
-; AVX2-NEXT:    vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10],zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26]
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0]
-; AVX2-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm9, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpslldq {{.*#+}} ymm1 = zero,zero,zero,zero,zero,ymm1[0,1,2,3,4,5,6,7,8,9,10],zero,zero,zero,zero,zero,ymm1[16,17,18,19,20,21,22,23,24,25,26]
-; AVX2-NEXT:    vpblendvb %ymm9, %ymm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm4[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm4[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
-; AVX2-NEXT:    vpblendvb %ymm9, %ymm10, %ymm2, %ymm10
-; AVX2-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm5[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm5[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
-; AVX2-NEXT:    vpblendvb %ymm9, %ymm11, %ymm3, %ymm9
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm3 = ymm7[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm7[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm2 = ymm6[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm6[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
-; AVX2-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm1[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
-; AVX2-NEXT:    vpslldq {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,ymm5[0,1,2,3,4,5,6,7,8,9],zero,zero,zero,zero,zero,zero,ymm5[16,17,18,19,20,21,22,23,24,25]
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm1, %ymm5, %ymm1
-; AVX2-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm0[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
-; AVX2-NEXT:    vpslldq {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,ymm4[0,1,2,3,4,5,6,7,8,9],zero,zero,zero,zero,zero,zero,ymm4[16,17,18,19,20,21,22,23,24,25]
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm0, %ymm4, %ymm0
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm4 = ymm9[5,6,7,8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4],ymm9[21,22,23,24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20]
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm5 = ymm10[5,6,7,8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4],ymm10[21,22,23,24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm6 = ymm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm7 = ymm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm8 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm9 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm7[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm7[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm1 = ymm6[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm6[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm4 = ymm4[5,6,7,8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4],ymm4[21,22,23,24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm5 = ymm5[5,6,7,8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4],ymm5[21,22,23,24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm3 = ymm9[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm9[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm2 = ymm8[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm8[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm5 = ymm5[5,6,7,8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4],ymm5[21,22,23,24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm4 = ymm4[5,6,7,8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4],ymm4[21,22,23,24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20]
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm2, %ymm6
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
 ; AVX2-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
 ; AVX2-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm5
-; AVX2-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm4
+; AVX2-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
 ; AVX2-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm4[2,3]
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm5[2,3]
 ; AVX2-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
-; AVX2-NEXT:    vmovdqu %ymm3, 128(%rdi)
-; AVX2-NEXT:    vmovdqu %ymm2, 32(%rdi)
 ; AVX2-NEXT:    vmovdqu %ymm1, 160(%rdi)
+; AVX2-NEXT:    vmovdqu %ymm3, 128(%rdi)
 ; AVX2-NEXT:    vmovdqu %ymm0, 64(%rdi)
-; AVX2-NEXT:    vmovdqu %ymm5, 96(%rdi)
+; AVX2-NEXT:    vmovdqu %ymm2, 32(%rdi)
+; AVX2-NEXT:    vmovdqu %ymm4, 96(%rdi)
 ; AVX2-NEXT:    vmovdqu %ymm6, (%rdi)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1325,125 +1264,79 @@ ret void
 define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
 ; AVX1-LABEL: interleaved_load_vf64_i8_stride3:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $40, %rsp
-; AVX1-NEXT:    .cfi_def_cfa_offset 48
-; AVX1-NEXT:    vmovdqu (%rdi), %xmm9
-; AVX1-NEXT:    vmovdqu 16(%rdi), %xmm11
-; AVX1-NEXT:    vmovdqu 48(%rdi), %xmm10
-; AVX1-NEXT:    vmovdqu 64(%rdi), %xmm15
-; AVX1-NEXT:    vmovdqu 80(%rdi), %xmm14
-; AVX1-NEXT:    vmovdqu 96(%rdi), %xmm3
-; AVX1-NEXT:    vmovdqu 112(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqu 144(%rdi), %xmm6
-; AVX1-NEXT:    vmovdqu 160(%rdi), %xmm12
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,0,3,6,9,12,15,2,5,8,11,14]
-; AVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm2, %xmm6, %xmm13
-; AVX1-NEXT:    vpshufb %xmm2, %xmm9, %xmm5
-; AVX1-NEXT:    vpshufb %xmm2, %xmm10, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <1,4,7,10,13,128,128,128,128,128,128,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm8, %xmm1, %xmm7
-; AVX1-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vpor %xmm3, %xmm7, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm8, %xmm12, %xmm7
-; AVX1-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpor %xmm6, %xmm7, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm9, %xmm7
-; AVX1-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm8, %xmm11, %xmm3
-; AVX1-NEXT:    vpor %xmm7, %xmm3, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm10, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm8, %xmm15, %xmm7
-; AVX1-NEXT:    vpor %xmm1, %xmm7, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm15, %xmm7
-; AVX1-NEXT:    vpshufb %xmm8, %xmm14, %xmm6
-; AVX1-NEXT:    vpor %xmm7, %xmm6, %xmm15
-; AVX1-NEXT:    vmovdqu 32(%rdi), %xmm7
-; AVX1-NEXT:    vpshufb %xmm0, %xmm11, %xmm9
-; AVX1-NEXT:    vpshufb %xmm8, %xmm7, %xmm10
-; AVX1-NEXT:    vpor %xmm9, %xmm10, %xmm10
-; AVX1-NEXT:    vmovdqu 176(%rdi), %xmm9
-; AVX1-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
-; AVX1-NEXT:    vpshufb %xmm8, %xmm9, %xmm11
-; AVX1-NEXT:    vpor %xmm1, %xmm11, %xmm11
-; AVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vmovdqu 128(%rdi), %xmm1
-; AVX1-NEXT:    vpshufb %xmm8, %xmm1, %xmm3
-; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = [1,4,7,10,13,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm1, %xmm6
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpor %xmm6, %xmm2, %xmm6
-; AVX1-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm8 = xmm2[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm9, %xmm6
-; AVX1-NEXT:    vpor %xmm6, %xmm13, %xmm12
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm13 = xmm13[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm7, %xmm3
-; AVX1-NEXT:    vpor %xmm3, %xmm5, %xmm11
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7,8,9,10]
-; AVX1-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
-; AVX1-NEXT:    vpor %xmm0, %xmm4, %xmm10
-; AVX1-NEXT:    vpalignr {{.*#+}} xmm2 = xmm4[11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7,8,9,10]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,128,128,128,128,128]
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14]
-; AVX1-NEXT:    vpshufb %xmm6, %xmm14, %xmm4
-; AVX1-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm6, %xmm7, %xmm4
-; AVX1-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddb %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpshufb %xmm6, %xmm9, %xmm5
-; AVX1-NEXT:    vpor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpaddb %xmm4, %xmm13, %xmm4
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpaddb %xmm1, %xmm8, %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4]
-; AVX1-NEXT:    vpshufb %xmm3, %xmm10, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128]
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vpaddb %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpshufb %xmm3, %xmm11, %xmm5
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vpaddb %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpshufb %xmm3, %xmm12, %xmm5
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vpaddb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vmovdqa (%rsp), %xmm5 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqu (%rdi), %xmm11
+; AVX1-NEXT:    vmovdqu 16(%rdi), %xmm10
+; AVX1-NEXT:    vmovdqu 32(%rdi), %xmm8
+; AVX1-NEXT:    vmovdqu 48(%rdi), %xmm3
+; AVX1-NEXT:    vmovdqu 64(%rdi), %xmm12
+; AVX1-NEXT:    vmovdqu 80(%rdi), %xmm9
+; AVX1-NEXT:    vmovdqu 96(%rdi), %xmm6
+; AVX1-NEXT:    vmovdqu 112(%rdi), %xmm14
+; AVX1-NEXT:    vmovdqu 128(%rdi), %xmm13
+; AVX1-NEXT:    vmovdqu 144(%rdi), %xmm5
+; AVX1-NEXT:    vmovdqu 160(%rdi), %xmm1
+; AVX1-NEXT:    vmovdqu 176(%rdi), %xmm15
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
+; AVX1-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
+; AVX1-NEXT:    vpshufb %xmm4, %xmm11, %xmm11
+; AVX1-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufb %xmm4, %xmm10, %xmm10
+; AVX1-NEXT:    vpshufb %xmm4, %xmm12, %xmm12
+; AVX1-NEXT:    vpshufb %xmm4, %xmm14, %xmm14
+; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm4, %xmm13, %xmm2
+; AVX1-NEXT:    vpshufb %xmm4, %xmm15, %xmm0
+; AVX1-NEXT:    vpshufb %xmm4, %xmm8, %xmm7
+; AVX1-NEXT:    vpshufb %xmm4, %xmm9, %xmm4
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm13 = xmm4[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm15 = xmm7[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm9 = xmm0[11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm8 = xmm2[11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm1[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm1
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm11 = xmm11[11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm2 = xmm14[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm11, %ymm14
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm12[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm7 = xmm10[11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm10
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX1-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX1-NEXT:    vandnps %ymm10, %ymm12, %ymm10
+; AVX1-NEXT:    vandps %ymm12, %ymm14, %ymm14
+; AVX1-NEXT:    vorps %ymm10, %ymm14, %ymm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm14
+; AVX1-NEXT:    vandnps %ymm14, %ymm12, %ymm14
+; AVX1-NEXT:    vandps %ymm1, %ymm12, %ymm1
+; AVX1-NEXT:    vorps %ymm1, %ymm14, %ymm1
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm13[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm12 = xmm15[11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm7 = xmm11[11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm9[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm9 = xmm8[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7,8,9,10]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm2 = xmm5[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
+; AVX1-NEXT:    vpaddb %xmm0, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm0
+; AVX1-NEXT:    vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
+; AVX1-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpaddb %xmm12, %xmm10, %xmm3
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm4 = xmm7[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
+; AVX1-NEXT:    vpaddb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT:    vpaddb %xmm1, %xmm9, %xmm1
+; AVX1-NEXT:    vpalignr {{.*#+}} xmm3 = xmm6[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
 ; AVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT:    addq $40, %rsp
-; AVX1-NEXT:    .cfi_def_cfa_offset 8
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: interleaved_load_vf64_i8_stride3:
@@ -1454,51 +1347,39 @@ define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
 ; AVX2-NEXT:    vmovdqu 96(%rdi), %xmm3
 ; AVX2-NEXT:    vmovdqu 112(%rdi), %xmm4
 ; AVX2-NEXT:    vmovdqu 128(%rdi), %xmm5
-; AVX2-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm6
-; AVX2-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm0
+; AVX2-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
+; AVX2-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
 ; AVX2-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
 ; AVX2-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
-; AVX2-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm1
-; AVX2-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm4
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
-; AVX2-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm5, %ymm6, %ymm2, %ymm7
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm8 = [1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14]
-; AVX2-NEXT:    vpshufb %ymm8, %ymm7, %ymm7
-; AVX2-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm5
-; AVX2-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [255,0,0,255,0,0,255,0,0,255,0,0,255,0,0,255,255,0,0,255,0,0,255,0,0,255,0,0,255,0,0,255]
-; AVX2-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm1, %ymm3, %ymm3
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm0, %ymm6, %ymm6
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm2, %ymm0, %ymm9
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,4,7,10,13,0,3,6,9,12,15,u,u,u,u,u,1,4,7,10,13,0,3,6,9,12,15,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %ymm10, %ymm9, %ymm9
-; AVX2-NEXT:    vpblendvb %ymm8, %ymm4, %ymm1, %ymm8
-; AVX2-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm8 = ymm5[11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23,24,25,26]
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm9 = ymm7[11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23,24,25,26]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm10 = [1,4,7,10,13,0,3,6,9,12,15,128,128,128,128,128,17,20,23,26,29,16,19,22,25,28,31,128,128,128,128,128]
-; AVX2-NEXT:    vpshufb %ymm10, %ymm6, %ymm6
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128,128,128,128,128,128,128,18,21,24,27,30]
-; AVX2-NEXT:    vpshufb %ymm11, %ymm2, %ymm2
-; AVX2-NEXT:    vpor %ymm2, %ymm6, %ymm2
-; AVX2-NEXT:    vpaddb %ymm2, %ymm9, %ymm2
-; AVX2-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
-; AVX2-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX2-NEXT:    vpor %ymm4, %ymm3, %ymm3
-; AVX2-NEXT:    vpaddb %ymm3, %ymm8, %ymm3
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm4 = ymm7[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,2,5,8,11,14,u,u,u,u,u,u,u,u,u,u,u,2,5,8,11,14,u,u,u,u,u>
+; AVX2-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm4
+; AVX2-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm5
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
+; AVX2-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
 ; AVX2-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255]
-; AVX2-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendvb %ymm7, %ymm4, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpalignr {{.*#+}} ymm2 = ymm5[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20]
 ; AVX2-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX2-NEXT:    vpshufb %ymm6, %ymm5, %ymm5
+; AVX2-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm6 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm7 = ymm5[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm2 = ymm4[11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10],ymm4[27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm4 = ymm7[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm5 = ymm6[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX2-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX2-NEXT:    vpblendvb %ymm8, %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm5, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpaddb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
+; AVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm1 = ymm3[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
+; AVX2-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
+; AVX2-NEXT:    vpaddb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: interleaved_load_vf64_i8_stride3:


        


More information about the llvm-commits mailing list