[llvm] 248567a - [DAGCombiner] Try to partition ISD::EXTRACT_VECTOR_ELT to accomodate it's ISD::BUILD_VECTOR users

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 29 14:27:12 PST 2022


Author: Roman Lebedev
Date: 2022-12-30T01:15:53+03:00
New Revision: 248567a3271961dd5b739890a6bdea22b3829490

URL: https://github.com/llvm/llvm-project/commit/248567a3271961dd5b739890a6bdea22b3829490
DIFF: https://github.com/llvm/llvm-project/commit/248567a3271961dd5b739890a6bdea22b3829490.diff

LOG: [DAGCombiner] Try to partition ISD::EXTRACT_VECTOR_ELT to accomodate it's ISD::BUILD_VECTOR users

This mainly cleans up a few patterns that are legalized by scalarization
from a wide-element vector, but then are further split apart to build
a more narrow-sized-element vector. In particular this happens in some
cases for illegal ISD::ZERO_EXTEND_VECTOR_INREG.

Given a ISD::EXTRACT_VECTOR_ELT, which is a glorified bit sequence extract,
recursively analyse all of it's users. and try to model themselves as
bit sequence extractions. If all of them agree on the new, narrower element
type, and all of them can be modelled as ISD::EXTRACT_VECTOR_ELT's of that
new element type, do that, but only if unmodelled users are ISD::BUILD_VECTOR.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index fd27220f48002..48e4f24a4e220 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -500,6 +500,8 @@ namespace {
     SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain);
     SDValue replaceStoreOfFPConstant(StoreSDNode *ST);
 
+    bool refineExtractVectorEltIntoMultipleNarrowExtractVectorElts(SDNode *N);
+
     SDValue visitSTORE(SDNode *N);
     SDValue visitLIFETIME_END(SDNode *N);
     SDValue visitINSERT_VECTOR_ELT(SDNode *N);
@@ -20256,6 +20258,168 @@ static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
   return SDValue();
 }
 
+// Given a ISD::EXTRACT_VECTOR_ELT, which is a glorified bit sequence extract,
+// recursively analyse all of it's users. and try to model themselves as
+// bit sequence extractions. If all of them agree on the new, narrower element
+// type, and all of them can be modelled as ISD::EXTRACT_VECTOR_ELT's of that
+// new element type, do so now.
+// This is mainly useful to recover from legalization that scalarized
+// the vector as wide elements, but tries to rebuild it with narrower elements.
+//
+// Some more nodes could be modelled if that helps cover interesting patterns.
+bool DAGCombiner::refineExtractVectorEltIntoMultipleNarrowExtractVectorElts(
+    SDNode *N) {
+  // We perform this optimization post type-legalization because
+  // the type-legalizer often scalarizes integer-promoted vectors.
+  // Performing this optimization before may cause legalizaton cycles.
+  if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
+    return false;
+
+  // TODO: Add support for big-endian.
+  if (DAG.getDataLayout().isBigEndian())
+    return false;
+
+  SDValue VecOp = N->getOperand(0);
+  EVT VecVT = VecOp.getValueType();
+  assert(!VecVT.isScalableVector() && "Only for fixed vectors.");
+
+  // We must start with a constant extraction index.
+  auto *IndexC = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  if (!IndexC)
+    return false;
+
+  assert(IndexC->getZExtValue() < VecVT.getVectorNumElements() &&
+         "Original ISD::EXTRACT_VECTOR_ELT is undefinend?");
+
+  // TODO: deal with the case of implicit anyext of the extraction.
+  unsigned VecEltBitWidth = VecVT.getScalarSizeInBits();
+  EVT ScalarVT = N->getValueType(0);
+  if (VecVT.getScalarType() != ScalarVT)
+    return false;
+
+  // TODO: deal with the cases other than everything being integer-typed.
+  if (!ScalarVT.isScalarInteger())
+    return false;
+
+  struct Entry {
+    SDNode *Producer;
+
+    // Which bits of VecOp does it contain?
+    unsigned BitPos;
+    int NumBits;
+    // NOTE: the actual width of \p Producer may be wider than NumBits!
+
+    Entry(Entry &&) = default;
+    Entry(SDNode *Producer_, unsigned BitPos_, int NumBits_)
+        : Producer(Producer_), BitPos(BitPos_), NumBits(NumBits_) {}
+
+    Entry() = delete;
+    Entry(const Entry &) = delete;
+    Entry &operator=(const Entry &) = delete;
+    Entry &operator=(Entry &&) = delete;
+  };
+  SmallVector<Entry, 32> Worklist;
+  SmallVector<Entry, 32> Leafs;
+
+  // We start at the "root" ISD::EXTRACT_VECTOR_ELT.
+  Worklist.emplace_back(N, /*BitPos=*/VecEltBitWidth * IndexC->getZExtValue(),
+                        /*NumBits=*/VecEltBitWidth);
+
+  while (!Worklist.empty()) {
+    Entry E = Worklist.pop_back_val();
+    // Does the node not even use any of the VecOp bits?
+    if (!(E.NumBits > 0 && E.BitPos < VecVT.getSizeInBits() &&
+          E.BitPos + E.NumBits <= VecVT.getSizeInBits()))
+      return false; // Let's allow the other combines clean this up first.
+    // Did we fail to model any of the users of the Producer?
+    bool ProducerIsLeaf = false;
+    // Look at each user of this Producer.
+    for (SDNode *User : E.Producer->uses()) {
+      switch (User->getOpcode()) {
+      // TODO: support ISD::BITCAST
+      // TODO: support ISD::ANY_EXTEND
+      // TODO: support ISD::ZERO_EXTEND
+      // TODO: support ISD::SIGN_EXTEND
+      case ISD::TRUNCATE:
+        // Truncation simply means we keep position, but extract less bits.
+        Worklist.emplace_back(User, E.BitPos,
+                              /*NumBits=*/User->getValueSizeInBits(0));
+        break;
+      // TODO: support ISD::SRA
+      // TODO: support ISD::SHL
+      case ISD::SRL:
+        // We should be shifting the Producer by a constant amount.
+        if (auto *ShAmtC = dyn_cast<ConstantSDNode>(User->getOperand(1));
+            User->getOperand(0).getNode() == E.Producer && ShAmtC) {
+          // Logical right-shift means that we start extraction later,
+          // but stop it at the same position we did previously.
+          unsigned ShAmt = ShAmtC->getZExtValue();
+          Worklist.emplace_back(User, E.BitPos + ShAmt, E.NumBits - ShAmt);
+          break;
+        }
+        [[fallthrough]];
+      default:
+        // We can not model this user of the Producer.
+        // Which means the current Producer will be a ISD::EXTRACT_VECTOR_ELT.
+        ProducerIsLeaf = true;
+        // Profitability check: all users that we can not model
+        //                      must be ISD::BUILD_VECTOR's.
+        if (User->getOpcode() != ISD::BUILD_VECTOR)
+          return false;
+        break;
+      }
+    }
+    if (ProducerIsLeaf)
+      Leafs.emplace_back(std::move(E));
+  }
+
+  unsigned NewVecEltBitWidth = Leafs.front().NumBits;
+
+  // If we are still at the same element granularity, give up,
+  if (NewVecEltBitWidth == VecEltBitWidth)
+    return false;
+
+  // The vector width must be a multiple of the new element width.
+  if (VecVT.getSizeInBits() % NewVecEltBitWidth != 0)
+    return false;
+
+  // All leafs must agree on the new element width.
+  // All leafs must not expect any "padding" bits ontop of that width.
+  // All leafs must start extraction from multiple of that width.
+  if (!all_of(Leafs, [NewVecEltBitWidth](const Entry &E) {
+        return (unsigned)E.NumBits == NewVecEltBitWidth &&
+               E.Producer->getValueSizeInBits(0) == NewVecEltBitWidth &&
+               E.BitPos % NewVecEltBitWidth == 0;
+      }))
+    return false;
+
+  EVT NewScalarVT = EVT::getIntegerVT(*DAG.getContext(), NewVecEltBitWidth);
+  EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewScalarVT,
+                                  VecVT.getSizeInBits() / NewVecEltBitWidth);
+
+  if (LegalTypes &&
+      !(TLI.isTypeLegal(NewScalarVT) && TLI.isTypeLegal(NewVecVT)))
+    return false;
+
+  if (LegalOperations &&
+      !(TLI.isOperationLegalOrCustom(ISD::BITCAST, NewVecVT) &&
+        TLI.isOperationLegalOrCustom(ISD::EXTRACT_VECTOR_ELT, NewVecVT)))
+    return false;
+
+  SDValue NewVecOp = DAG.getBitcast(NewVecVT, VecOp);
+  for (const Entry &E : Leafs) {
+    SDLoc DL(E.Producer);
+    unsigned NewIndex = E.BitPos / NewVecEltBitWidth;
+    assert(NewIndex < NewVecVT.getVectorNumElements() &&
+           "Creating out-of-bounds ISD::EXTRACT_VECTOR_ELT?");
+    SDValue V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, NewScalarVT, NewVecOp,
+                            DAG.getVectorIdxConstant(NewIndex, DL));
+    CombineTo(E.Producer, V);
+  }
+
+  return true;
+}
+
 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
   SDValue VecOp = N->getOperand(0);
   SDValue Index = N->getOperand(1);
@@ -20450,6 +20614,9 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
     }
   }
 
+  if (refineExtractVectorEltIntoMultipleNarrowExtractVectorElts(N))
+    return SDValue(N, 0);
+
   // Everything under here is trying to match an extract of a loaded value.
   // If the result of load has to be truncated, then it's not necessarily
   // profitable.

diff  --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
index 2875b03fe1c29..4d6d5edd16510 100644
--- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
+++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll
@@ -978,201 +978,59 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
 ; SSE42-NEXT:    popq %rbx
 ; SSE42-NEXT:    retq
 ;
-; AVX1-LABEL: _clearupper32xi8b:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    movq %rax, %rdx
-; AVX1-NEXT:    movq %rax, %rsi
-; AVX1-NEXT:    movq %rax, %rdi
-; AVX1-NEXT:    shrq $32, %rdi
-; AVX1-NEXT:    andl $15, %edi
-; AVX1-NEXT:    shlq $32, %rdi
-; AVX1-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; AVX1-NEXT:    orq %rdi, %rax
-; AVX1-NEXT:    movq -{{[0-9]+}}(%rsp), %rdi
-; AVX1-NEXT:    shrq $40, %rsi
-; AVX1-NEXT:    andl $15, %esi
-; AVX1-NEXT:    shlq $40, %rsi
-; AVX1-NEXT:    orq %rax, %rsi
-; AVX1-NEXT:    movq %rdi, %rax
-; AVX1-NEXT:    shrq $48, %rdx
-; AVX1-NEXT:    andl $15, %edx
-; AVX1-NEXT:    shlq $48, %rdx
-; AVX1-NEXT:    orq %rsi, %rdx
-; AVX1-NEXT:    movq %rdi, %rsi
-; AVX1-NEXT:    shrq $56, %rcx
-; AVX1-NEXT:    andl $15, %ecx
-; AVX1-NEXT:    shlq $56, %rcx
-; AVX1-NEXT:    orq %rdx, %rcx
-; AVX1-NEXT:    movq %rdi, %rdx
-; AVX1-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    movq %rdi, %rcx
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    andl $15, %ecx
-; AVX1-NEXT:    shlq $32, %rcx
-; AVX1-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; AVX1-NEXT:    orq %rcx, %rdi
-; AVX1-NEXT:    shrq $40, %rdx
-; AVX1-NEXT:    andl $15, %edx
-; AVX1-NEXT:    shlq $40, %rdx
-; AVX1-NEXT:    orq %rdi, %rdx
-; AVX1-NEXT:    shrq $48, %rsi
-; AVX1-NEXT:    andl $15, %esi
-; AVX1-NEXT:    shlq $48, %rsi
-; AVX1-NEXT:    orq %rdx, %rsi
-; AVX1-NEXT:    shrq $56, %rax
-; AVX1-NEXT:    andl $15, %eax
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    shlq $56, %rax
-; AVX1-NEXT:    orq %rsi, %rax
-; AVX1-NEXT:    vmovq %xmm0, %rcx
-; AVX1-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    movl %ecx, %eax
-; AVX1-NEXT:    shrl $8, %eax
-; AVX1-NEXT:    vmovd %ecx, %xmm1
-; AVX1-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    movl %ecx, %eax
-; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    movl %ecx, %eax
-; AVX1-NEXT:    shrl $24, %eax
-; AVX1-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    movq %rcx, %rax
-; AVX1-NEXT:    shrq $32, %rax
-; AVX1-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    movq %rcx, %rax
-; AVX1-NEXT:    shrq $40, %rax
-; AVX1-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    movq %rcx, %rax
-; AVX1-NEXT:    shrq $48, %rax
-; AVX1-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    shrq $56, %rcx
-; AVX1-NEXT:    vpinsrb $7, %ecx, %xmm1, %xmm0
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $8, %ecx
-; AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $16, %ecx
-; AVX1-NEXT:    vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $24, %ecx
-; AVX1-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shrq $40, %rcx
-; AVX1-NEXT:    vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    shrq $48, %rcx
-; AVX1-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    shrq $56, %rax
-; AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm1
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: _clearupper32xi8b:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    movq %rax, %rdx
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    shrq $32, %rdi
-; AVX2-NEXT:    andl $15, %edi
-; AVX2-NEXT:    shlq $32, %rdi
-; AVX2-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; AVX2-NEXT:    orq %rdi, %rax
-; AVX2-NEXT:    movq -{{[0-9]+}}(%rsp), %rdi
-; AVX2-NEXT:    shrq $40, %rsi
-; AVX2-NEXT:    andl $15, %esi
-; AVX2-NEXT:    shlq $40, %rsi
-; AVX2-NEXT:    orq %rax, %rsi
-; AVX2-NEXT:    movq %rdi, %rax
-; AVX2-NEXT:    shrq $48, %rdx
-; AVX2-NEXT:    andl $15, %edx
-; AVX2-NEXT:    shlq $48, %rdx
-; AVX2-NEXT:    orq %rsi, %rdx
-; AVX2-NEXT:    movq %rdi, %rsi
-; AVX2-NEXT:    shrq $56, %rcx
-; AVX2-NEXT:    andl $15, %ecx
-; AVX2-NEXT:    shlq $56, %rcx
-; AVX2-NEXT:    orq %rdx, %rcx
-; AVX2-NEXT:    movq %rdi, %rdx
-; AVX2-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    movq %rdi, %rcx
-; AVX2-NEXT:    shrq $32, %rcx
-; AVX2-NEXT:    andl $15, %ecx
-; AVX2-NEXT:    shlq $32, %rcx
-; AVX2-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; AVX2-NEXT:    orq %rcx, %rdi
-; AVX2-NEXT:    shrq $40, %rdx
-; AVX2-NEXT:    andl $15, %edx
-; AVX2-NEXT:    shlq $40, %rdx
-; AVX2-NEXT:    orq %rdi, %rdx
-; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    andl $15, %esi
-; AVX2-NEXT:    shlq $48, %rsi
-; AVX2-NEXT:    orq %rdx, %rsi
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    andl $15, %eax
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT:    shlq $56, %rax
-; AVX2-NEXT:    orq %rsi, %rax
-; AVX2-NEXT:    vmovq %xmm0, %rcx
-; AVX2-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    movl %ecx, %eax
-; AVX2-NEXT:    shrl $8, %eax
-; AVX2-NEXT:    vmovd %ecx, %xmm1
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    movl %ecx, %eax
-; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    movl %ecx, %eax
-; AVX2-NEXT:    shrl $24, %eax
-; AVX2-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    movq %rcx, %rax
-; AVX2-NEXT:    shrq $32, %rax
-; AVX2-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    movq %rcx, %rax
-; AVX2-NEXT:    shrq $40, %rax
-; AVX2-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    movq %rcx, %rax
-; AVX2-NEXT:    shrq $48, %rax
-; AVX2-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    shrq $56, %rcx
-; AVX2-NEXT:    vpinsrb $7, %ecx, %xmm1, %xmm0
-; AVX2-NEXT:    movl %eax, %ecx
-; AVX2-NEXT:    shrl $8, %ecx
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %ecx
-; AVX2-NEXT:    shrl $16, %ecx
-; AVX2-NEXT:    vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %ecx
-; AVX2-NEXT:    shrl $24, %ecx
-; AVX2-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shrq $32, %rcx
-; AVX2-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shrq $40, %rcx
-; AVX2-NEXT:    vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    shrq $48, %rcx
-; AVX2-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm1
-; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: _clearupper32xi8b:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rdi
+; AVX-NEXT:    movq %rdx, %rax
+; AVX-NEXT:    shrq $56, %rax
+; AVX-NEXT:    andl $15, %eax
+; AVX-NEXT:    movq %rdx, %rcx
+; AVX-NEXT:    shrq $48, %rcx
+; AVX-NEXT:    andl $15, %ecx
+; AVX-NEXT:    movq %rdx, %rsi
+; AVX-NEXT:    shrq $40, %rsi
+; AVX-NEXT:    andl $15, %esi
+; AVX-NEXT:    movq %rdx, %r8
+; AVX-NEXT:    shrq $32, %r8
+; AVX-NEXT:    andl $15, %r8d
+; AVX-NEXT:    movq %rdi, %r9
+; AVX-NEXT:    shrq $56, %r9
+; AVX-NEXT:    andl $15, %r9d
+; AVX-NEXT:    movq %rdi, %r10
+; AVX-NEXT:    shrq $48, %r10
+; AVX-NEXT:    andl $15, %r10d
+; AVX-NEXT:    movq %rdi, %r11
+; AVX-NEXT:    shrq $40, %r11
+; AVX-NEXT:    andl $15, %r11d
+; AVX-NEXT:    movq %rdi, %rbx
+; AVX-NEXT:    shrq $32, %rbx
+; AVX-NEXT:    andl $15, %ebx
+; AVX-NEXT:    shlq $32, %rbx
+; AVX-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
+; AVX-NEXT:    orq %rbx, %rdi
+; AVX-NEXT:    shlq $40, %r11
+; AVX-NEXT:    orq %rdi, %r11
+; AVX-NEXT:    shlq $48, %r10
+; AVX-NEXT:    orq %r11, %r10
+; AVX-NEXT:    shlq $56, %r9
+; AVX-NEXT:    orq %r10, %r9
+; AVX-NEXT:    movq %r9, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    shlq $32, %r8
+; AVX-NEXT:    andl $252645135, %edx # imm = 0xF0F0F0F
+; AVX-NEXT:    orq %r8, %rdx
+; AVX-NEXT:    shlq $40, %rsi
+; AVX-NEXT:    orq %rdx, %rsi
+; AVX-NEXT:    shlq $48, %rcx
+; AVX-NEXT:    orq %rsi, %rcx
+; AVX-NEXT:    shlq $56, %rax
+; AVX-NEXT:    orq %rcx, %rax
+; AVX-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    vinsertf128 $0, -{{[0-9]+}}(%rsp), %ymm0, %ymm0
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
   %x4  = bitcast <32 x i8> %0 to <64 x i4>
   %r0  = insertelement <64 x i4> %x4,  i4 zeroinitializer, i32 1
   %r1  = insertelement <64 x i4> %r0,  i4 zeroinitializer, i32 3

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
index ac5f943cc14fe..e7ea3756b8f16 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
@@ -4964,50 +4964,6 @@ define void @vec384_v3i128_to_v1i384_factor3(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    movq %rax, %rdi
-; AVX-NEXT:    movq %rax, %r8
-; AVX-NEXT:    movq %rax, %r9
-; AVX-NEXT:    movl %eax, %r10d
-; AVX-NEXT:    movl %eax, %r11d
-; AVX-NEXT:    vmovd %eax, %xmm1
-; AVX-NEXT:    shrl $8, %eax
-; AVX-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    shrl $16, %r11d
-; AVX-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX-NEXT:    shrl $24, %r10d
-; AVX-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX-NEXT:    shrq $32, %r9
-; AVX-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX-NEXT:    shrq $40, %r8
-; AVX-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX-NEXT:    shrq $48, %rdi
-; AVX-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    shrq $56, %rsi
-; AVX-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $8, %esi
-; AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $16, %esi
-; AVX-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $24, %esi
-; AVX-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $32, %rsi
-; AVX-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $40, %rsi
-; AVX-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $48, %rsi
-; AVX-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX-NEXT:    shrq $56, %rax
-; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps 32(%rdx), %ymm1
 ; AVX-NEXT:    vpaddb (%rdx), %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps 16(%rdx), %xmm2
@@ -5021,50 +4977,6 @@ define void @vec384_v3i128_to_v1i384_factor3(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    movq %rax, %r8
-; AVX2-NEXT:    movq %rax, %r9
-; AVX2-NEXT:    movl %eax, %r10d
-; AVX2-NEXT:    movl %eax, %r11d
-; AVX2-NEXT:    vmovd %eax, %xmm1
-; AVX2-NEXT:    shrl $8, %eax
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $16, %r11d
-; AVX2-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $24, %r10d
-; AVX2-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $32, %r9
-; AVX2-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $40, %r8
-; AVX2-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $48, %rdi
-; AVX2-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    shrq $56, %rsi
-; AVX2-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $8, %esi
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $16, %esi
-; AVX2-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $24, %esi
-; AVX2-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $32, %rsi
-; AVX2-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $40, %rsi
-; AVX2-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovaps 32(%rdx), %ymm1
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovaps %ymm1, 32(%rcx)
@@ -5074,52 +4986,8 @@ define void @vec384_v3i128_to_v1i384_factor3(ptr %in.vec.base.ptr, ptr %in.vec.b
 ;
 ; AVX512F-LABEL: vec384_v3i128_to_v1i384_factor3:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    movq %rax, %rsi
-; AVX512F-NEXT:    movq %rax, %rdi
-; AVX512F-NEXT:    movq %rax, %r8
-; AVX512F-NEXT:    movq %rax, %r9
-; AVX512F-NEXT:    movl %eax, %r10d
-; AVX512F-NEXT:    movl %eax, %r11d
-; AVX512F-NEXT:    vmovd %eax, %xmm1
-; AVX512F-NEXT:    shrl $8, %eax
-; AVX512F-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    shrl $16, %r11d
-; AVX512F-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX512F-NEXT:    shrl $24, %r10d
-; AVX512F-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX512F-NEXT:    shrq $32, %r9
-; AVX512F-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX512F-NEXT:    shrq $40, %r8
-; AVX512F-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX512F-NEXT:    shrq $48, %rdi
-; AVX512F-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    shrq $56, %rsi
-; AVX512F-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm0
-; AVX512F-NEXT:    movl %eax, %esi
-; AVX512F-NEXT:    shrl $8, %esi
-; AVX512F-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    movl %eax, %esi
-; AVX512F-NEXT:    shrl $16, %esi
-; AVX512F-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    movl %eax, %esi
-; AVX512F-NEXT:    shrl $24, %esi
-; AVX512F-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    movq %rax, %rsi
-; AVX512F-NEXT:    shrq $32, %rsi
-; AVX512F-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    movq %rax, %rsi
-; AVX512F-NEXT:    shrq $40, %rsi
-; AVX512F-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    movq %rax, %rsi
-; AVX512F-NEXT:    shrq $48, %rsi
-; AVX512F-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    shrq $56, %rax
-; AVX512F-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovaps 32(%rdx), %ymm1
 ; AVX512F-NEXT:    vmovaps %ymm1, 32(%rcx)
@@ -5131,50 +4999,6 @@ define void @vec384_v3i128_to_v1i384_factor3(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovq %xmm0, %rax
-; AVX512BW-NEXT:    movq %rax, %rsi
-; AVX512BW-NEXT:    movq %rax, %rdi
-; AVX512BW-NEXT:    movq %rax, %r8
-; AVX512BW-NEXT:    movq %rax, %r9
-; AVX512BW-NEXT:    movl %eax, %r10d
-; AVX512BW-NEXT:    movl %eax, %r11d
-; AVX512BW-NEXT:    vmovd %eax, %xmm1
-; AVX512BW-NEXT:    shrl $8, %eax
-; AVX512BW-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    shrl $16, %r11d
-; AVX512BW-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX512BW-NEXT:    shrl $24, %r10d
-; AVX512BW-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX512BW-NEXT:    shrq $32, %r9
-; AVX512BW-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX512BW-NEXT:    shrq $40, %r8
-; AVX512BW-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX512BW-NEXT:    shrq $48, %rdi
-; AVX512BW-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512BW-NEXT:    shrq $56, %rsi
-; AVX512BW-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm0
-; AVX512BW-NEXT:    movl %eax, %esi
-; AVX512BW-NEXT:    shrl $8, %esi
-; AVX512BW-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    movl %eax, %esi
-; AVX512BW-NEXT:    shrl $16, %esi
-; AVX512BW-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    movl %eax, %esi
-; AVX512BW-NEXT:    shrl $24, %esi
-; AVX512BW-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    movq %rax, %rsi
-; AVX512BW-NEXT:    shrq $32, %rsi
-; AVX512BW-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    movq %rax, %rsi
-; AVX512BW-NEXT:    shrq $40, %rsi
-; AVX512BW-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    movq %rax, %rsi
-; AVX512BW-NEXT:    shrq $48, %rsi
-; AVX512BW-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    shrq $56, %rax
-; AVX512BW-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -7089,96 +6913,8 @@ define void @vec512_v4i128_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX-NEXT:    vpaddb 16(%rsi), %xmm1, %xmm1
-; AVX-NEXT:    vmovq %xmm1, %rax
 ; AVX-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    movq %rax, %rdi
-; AVX-NEXT:    movq %rax, %r8
-; AVX-NEXT:    movq %rax, %r9
-; AVX-NEXT:    movl %eax, %r10d
-; AVX-NEXT:    movl %eax, %r11d
-; AVX-NEXT:    vmovd %eax, %xmm2
-; AVX-NEXT:    shrl $8, %eax
-; AVX-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:    shrl $16, %r11d
-; AVX-NEXT:    vpinsrb $2, %r11d, %xmm2, %xmm2
-; AVX-NEXT:    shrl $24, %r10d
-; AVX-NEXT:    vpinsrb $3, %r10d, %xmm2, %xmm2
-; AVX-NEXT:    shrq $32, %r9
-; AVX-NEXT:    vpinsrb $4, %r9d, %xmm2, %xmm2
-; AVX-NEXT:    shrq $40, %r8
-; AVX-NEXT:    vpinsrb $5, %r8d, %xmm2, %xmm2
-; AVX-NEXT:    shrq $48, %rdi
-; AVX-NEXT:    vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX-NEXT:    shrq $56, %rsi
-; AVX-NEXT:    vpinsrb $7, %esi, %xmm2, %xmm1
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $8, %esi
-; AVX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpinsrb $9, %esi, %xmm1, %xmm1
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $16, %esi
-; AVX-NEXT:    vpinsrb $10, %esi, %xmm1, %xmm1
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $24, %esi
-; AVX-NEXT:    vpinsrb $11, %esi, %xmm1, %xmm1
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $32, %rsi
-; AVX-NEXT:    vpinsrb $12, %esi, %xmm1, %xmm1
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $40, %rsi
-; AVX-NEXT:    vpinsrb $13, %esi, %xmm1, %xmm1
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $48, %rsi
-; AVX-NEXT:    vpinsrb $14, %esi, %xmm1, %xmm1
-; AVX-NEXT:    vmovq %xmm0, %rsi
-; AVX-NEXT:    shrq $56, %rax
-; AVX-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX-NEXT:    movl %esi, %eax
-; AVX-NEXT:    shrl $8, %eax
-; AVX-NEXT:    vmovd %esi, %xmm2
-; AVX-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX-NEXT:    movl %esi, %eax
-; AVX-NEXT:    shrl $16, %eax
-; AVX-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX-NEXT:    movl %esi, %eax
-; AVX-NEXT:    shrl $24, %eax
-; AVX-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX-NEXT:    movq %rsi, %rax
-; AVX-NEXT:    shrq $32, %rax
-; AVX-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX-NEXT:    movq %rsi, %rax
-; AVX-NEXT:    shrq $40, %rax
-; AVX-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX-NEXT:    movq %rsi, %rax
-; AVX-NEXT:    shrq $48, %rax
-; AVX-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    shrq $56, %rsi
-; AVX-NEXT:    vpinsrb $7, %esi, %xmm2, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $8, %esi
-; AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $16, %esi
-; AVX-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $24, %esi
-; AVX-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $32, %rsi
-; AVX-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $40, %rsi
-; AVX-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $48, %rsi
-; AVX-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX-NEXT:    shrq $56, %rax
-; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpaddb 16(%rsi), %xmm1, %xmm1
 ; AVX-NEXT:    vpaddb 32(%rdx), %xmm1, %xmm1
 ; AVX-NEXT:    vpaddb (%rdx), %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps 16(%rdx), %xmm2
@@ -7193,99 +6929,12 @@ define void @vec512_v4i128_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    movq %rax, %r8
-; AVX2-NEXT:    movq %rax, %r9
-; AVX2-NEXT:    movl %eax, %r10d
-; AVX2-NEXT:    movl %eax, %r11d
-; AVX2-NEXT:    vmovd %eax, %xmm1
-; AVX2-NEXT:    shrl $8, %eax
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $16, %r11d
-; AVX2-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $24, %r10d
-; AVX2-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $32, %r9
-; AVX2-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $40, %r8
-; AVX2-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $48, %rdi
-; AVX2-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT:    shrq $56, %rsi
-; AVX2-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $8, %esi
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrb $9, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $16, %esi
-; AVX2-NEXT:    vpinsrb $10, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $24, %esi
-; AVX2-NEXT:    vpinsrb $11, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $32, %rsi
-; AVX2-NEXT:    vpinsrb $12, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $40, %rsi
-; AVX2-NEXT:    vpinsrb $13, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    vpinsrb $14, %esi, %xmm1, %xmm1
-; AVX2-NEXT:    vmovq %xmm0, %rsi
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    movl %esi, %eax
-; AVX2-NEXT:    shrl $8, %eax
-; AVX2-NEXT:    vmovd %esi, %xmm2
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    movl %esi, %eax
-; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    movl %esi, %eax
-; AVX2-NEXT:    shrl $24, %eax
-; AVX2-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    movq %rsi, %rax
-; AVX2-NEXT:    shrq $32, %rax
-; AVX2-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    movq %rsi, %rax
-; AVX2-NEXT:    shrq $40, %rax
-; AVX2-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    movq %rsi, %rax
-; AVX2-NEXT:    shrq $48, %rax
-; AVX2-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    shrq $56, %rsi
-; AVX2-NEXT:    vpinsrb $7, %esi, %xmm2, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $8, %esi
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $16, %esi
-; AVX2-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $24, %esi
-; AVX2-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $32, %rsi
-; AVX2-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $40, %rsi
-; AVX2-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
-; AVX2-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX2-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovdqa %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX2-NEXT:    vmovdqa %ymm1, 32(%rcx)
+; AVX2-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -7346,50 +6995,6 @@ define void @vec512_v4i128_to_v1i512_factor4(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    movq %rax, %rdi
-; AVX-NEXT:    movq %rax, %r8
-; AVX-NEXT:    movq %rax, %r9
-; AVX-NEXT:    movl %eax, %r10d
-; AVX-NEXT:    movl %eax, %r11d
-; AVX-NEXT:    vmovd %eax, %xmm1
-; AVX-NEXT:    shrl $8, %eax
-; AVX-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    shrl $16, %r11d
-; AVX-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX-NEXT:    shrl $24, %r10d
-; AVX-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX-NEXT:    shrq $32, %r9
-; AVX-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX-NEXT:    shrq $40, %r8
-; AVX-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX-NEXT:    shrq $48, %rdi
-; AVX-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    shrq $56, %rsi
-; AVX-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $8, %esi
-; AVX-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $16, %esi
-; AVX-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movl %eax, %esi
-; AVX-NEXT:    shrl $24, %esi
-; AVX-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $32, %rsi
-; AVX-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $40, %rsi
-; AVX-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX-NEXT:    movq %rax, %rsi
-; AVX-NEXT:    shrq $48, %rsi
-; AVX-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX-NEXT:    shrq $56, %rax
-; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps 32(%rdx), %ymm1
 ; AVX-NEXT:    vpaddb (%rdx), %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps 16(%rdx), %xmm2
@@ -7403,50 +7008,6 @@ define void @vec512_v4i128_to_v1i512_factor4(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    movq %rax, %r8
-; AVX2-NEXT:    movq %rax, %r9
-; AVX2-NEXT:    movl %eax, %r10d
-; AVX2-NEXT:    movl %eax, %r11d
-; AVX2-NEXT:    vmovd %eax, %xmm1
-; AVX2-NEXT:    shrl $8, %eax
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $16, %r11d
-; AVX2-NEXT:    vpinsrb $2, %r11d, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $24, %r10d
-; AVX2-NEXT:    vpinsrb $3, %r10d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $32, %r9
-; AVX2-NEXT:    vpinsrb $4, %r9d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $40, %r8
-; AVX2-NEXT:    vpinsrb $5, %r8d, %xmm1, %xmm1
-; AVX2-NEXT:    shrq $48, %rdi
-; AVX2-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    shrq $56, %rsi
-; AVX2-NEXT:    vpinsrb $7, %esi, %xmm1, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $8, %esi
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $9, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $16, %esi
-; AVX2-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movl %eax, %esi
-; AVX2-NEXT:    shrl $24, %esi
-; AVX2-NEXT:    vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $32, %rsi
-; AVX2-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $40, %rsi
-; AVX2-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    shrq $56, %rax
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovaps 32(%rdx), %ymm1
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovaps %ymm1, 32(%rcx)


        


More information about the llvm-commits mailing list